Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/openai/cli/_api/chat/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,12 @@
from argparse import ArgumentParser

from . import completions
from . import fine_tune

if TYPE_CHECKING:
from argparse import _SubParsersAction


def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
completions.register(subparser)
fine_tune.register(subparser)
80 changes: 80 additions & 0 deletions src/openai/cli/_api/chat/fine_tune.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
"""Chat fine-tuning commands for OpenAI CLI."""
from __future__ import annotations

import sys
import json
from typing import TYPE_CHECKING
from argparse import ArgumentParser

from ..._utils import get_client
from ..._models import BaseModel

if TYPE_CHECKING:
from argparse import _SubParsersAction


def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
"""Register chat fine-tuning subcommands."""
sub = subparser.add_parser("chat.fine_tune", help="Chat fine-tuning operations")
sub._action_groups.pop()

sub_subparsers = sub.add_subparsers(dest="chat_ft_command")

# Fine-tune a chat model
create_parser = sub_subparsers.add_parser("create", help="Create a chat fine-tuning job")
create_parser.add_argument("--training-file", required=True, help="Training file ID (JSONL format)")
create_parser.add_argument("--model", default="gpt-3.5-turbo", help="Base model to fine-tune")
create_parser.add_argument("--suffix", help="Custom suffix for model name")
create_parser.add_argument("--epochs", type=int, default=3, help="Number of epochs")
create_parser.set_defaults(func=_create_chat_ft)

# List chat fine-tuning jobs
list_parser = sub_subparsers.add_parser("list", help="List chat fine-tuning jobs")
list_parser.add_argument("--limit", type=int, default=10, help="Max jobs to return")
list_parser.set_defaults(func=_list_chat_ft)


class ChatFTCreateArgs(BaseModel):
training_file: str
model: str = "gpt-3.5-turbo"
suffix: str | None = None
epochs: int | None = None


class ChatFTListArgs(BaseModel):
limit: int = 10


def _create_chat_ft(args: ChatFTCreateArgs) -> None:
"""Create a chat fine-tuning job."""
client = get_client()

params = {
"model": args.model,
"training_file": args.training_file,
}

if args.suffix:
params["suffix"] = args.suffix
if args.epochs:
params["hyperparameters"] = {"n_epochs": args.epochs}

job = client.fine_tuning.jobs.create(**params)

print(f"✅ Chat fine-tuning job created: {job.id}")
print(f" Model: {job.model}")
print(f" Status: {job.status}")
print(f"\n💡 To check status: openai chat.fine_tune get {job.id}")


def _list_chat_ft(args: ChatFTListArgs) -> None:
"""List chat fine-tuning jobs."""
client = get_client()
jobs = client.fine_tuning.jobs.list(limit=args.limit)

print(f"{'ID':<50} {'Model':<25} {'Status':<15} {'Trained Tokens'}")
print("-" * 100)

for job in jobs.data:
tokens = job.trained_tokens or 0
print(f"{job.id:<50} {job.model:<25} {job.status:<15} {tokens}")
123 changes: 121 additions & 2 deletions src/openai/cli/_api/fine_tuning/__init__.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,132 @@
"""Fine-tuning commands for OpenAI CLI."""
from __future__ import annotations

import sys
import json
from typing import TYPE_CHECKING
from argparse import ArgumentParser

from . import jobs
from ..._utils import get_client
from ..._models import BaseModel

if TYPE_CHECKING:
from argparse import _SubParsersAction


def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
jobs.register(subparser)
"""Register fine-tuning subcommands."""
sub = subparser.add_parser("fine_tune", help="Fine-tuning operations")
sub._action_groups.pop()

sub_subparsers = sub.add_subparsers(dest="fine_tune_command")

# List fine-tuning jobs
list_parser = sub_subparsers.add_parser("list", help="List fine-tuning jobs")
list_parser.add_argument("--limit", type=int, default=10, help="Max jobs to return")
list_parser.set_defaults(func=_list_jobs)

# Create fine-tuning job
create_parser = sub_subparsers.add_parser("create", help="Create a fine-tuning job")
create_parser.add_argument("--training-file", required=True, help="Training file ID")
create_parser.add_argument("--model", required=True, help="Base model to fine-tune")
create_parser.add_argument("--suffix", help="Custom suffix for model name")
create_parser.add_argument("--epochs", type=int, default=3, help="Number of epochs")
create_parser.add_argument("--batch-size", type=int, help="Batch size")
create_parser.add_argument("--learning-rate", type=float, help="Learning rate multiplier")
create_parser.set_defaults(func=_create_job)

# Get fine-tuning job status
get_parser = sub_subparsers.add_parser("get", help="Get fine-tuning job status")
get_parser.add_argument("job_id", help="Fine-tuning job ID")
get_parser.set_defaults(func=_get_job)

# Cancel fine-tuning job
cancel_parser = sub_subparsers.add_parser("cancel", help="Cancel a fine-tuning job")
cancel_parser.add_argument("job_id", help="Fine-tuning job ID")
cancel_parser.set_defaults(func=_cancel_job)


class FineTuneListArgs(BaseModel):
limit: int = 10


class FineTuneCreateArgs(BaseModel):
training_file: str
model: str
suffix: str | None = None
epochs: int | None = None
batch_size: int | None = None
learning_rate_multiplier: float | None = None


class FineTuneGetArgs(BaseModel):
job_id: str


class FineTuneCancelArgs(BaseModel):
job_id: str


def _list_jobs(args: FineTuneListArgs) -> None:
"""List fine-tuning jobs."""
client = get_client()
jobs = client.fine_tuning.jobs.list(limit=args.limit)

print(f"{'ID':<50} {'Model':<30} {'Status':<15} {'Created'}")
print("-" * 120)

for job in jobs.data:
created = job.created_at.strftime("%Y-%m-%d %H:%M") if job.created_at else "N/A"
print(f"{job.id:<50} {job.model:<30} {job.status:<15} {created}")


def _create_job(args: FineTuneCreateArgs) -> None:
"""Create a fine-tuning job."""
client = get_client()

params = {
"model": args.model,
"training_file": args.training_file,
}

if args.suffix:
params["suffix"] = args.suffix
if args.epochs:
params["hyperparameters"] = {"n_epochs": args.epochs}
if args.batch_size:
if "hyperparameters" not in params:
params["hyperparameters"] = {}
params["hyperparameters"]["batch_size"] = args.batch_size
if args.learning_rate:
if "hyperparameters" not in params:
params["hyperparameters"] = {}
params["hyperparameters"]["learning_rate_multiplier"] = args.learning_rate

job = client.fine_tuning.jobs.create(**params)

print(f"✅ Fine-tuning job created: {job.id}")
print(f" Model: {job.model}")
print(f" Status: {job.status}")


def _get_job(args: FineTuneGetArgs) -> None:
"""Get fine-tuning job status."""
client = get_client()
job = client.fine_tuning.jobs.retrieve(args.job_id)

print(f"Fine-tuning Job: {job.id}")
print(f" Model: {job.model}")
print(f" Status: {job.status}")
print(f" Trained tokens: {job.trained_tokens or 0}")
print(f" Created: {job.created_at}")
print(f" Finished at: {job.finished_at}")

if job.error:
print(f" Error: {job.error}")


def _cancel_job(args: FineTuneCancelArgs) -> None:
"""Cancel a fine-tuning job."""
client = get_client()
job = client.fine_tuning.jobs.cancel(args.job_id)
print(f"✅ Fine-tuning job cancelled: {job.id}")
87 changes: 87 additions & 0 deletions src/openai_cli/completion.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
"""Shell completion for OpenAI CLI."""
import sys
import argparse

def install_completion(shell: str):
"""Generate shell completion script for OpenAI CLI."""
completions = {
'bash': '''# openai bash completion
_openai() {
local cur prev words cword
_init_completion || return
case $prev in
openai)
COMPREPLY=($(compgen -W "api chat models files embeddings completions --" -- "$cur"))
return
;;
--api-key|--api-key=)
return
;;
--model|--model=)
_filedir
return
;;
-o|--organization)
return
;;
esac
COMPREPLY=($(compgen -W "$(openai --help 2>&1 | grep -E '^ [a-z]' | awk '{print $1}' | tr '\\n' ' ')" -- "$cur"))
}
complete -F _openai openai''',
'zsh': '''# openai zsh completion
autoload -U compinit
compdef _openai openai

_openai() {
local -a commands
commands=(
'api:Direct API calls'
'chat:Chat completions'
'models:List models'
'files:File operations'
'embeddings:Embedding operations'
'completions:Completion operations'
)
_describe -t commands 'openai command' commands
}
''',
}
print(completions.get(shell, f'Error: Unknown shell {shell}. Supported shells: bash, zsh'))
return 0

def generate_completions() -> str:
"""Generate completion script dynamically based on available commands."""
# Get subcommands from CLI
commands = ["api", "chat", "models", "files", "embeddings", "completions", "fine_tuning"]
options = ["--api-key", "--model", "--organization", "--verbose", "--help"]

bash_completion = f'''# openai bash completion (auto-generated)
_openai() {{
local cur prev words cword
_init_completion || return
case $prev in
openai)
COMPREPLY=($(compgen -W "{' '.join(commands)}" -- "$cur"))
return
;;
${{words[0]}})
COMPREPLY=($(compgen -W "{' '.join(options)}" -- "$cur"))
return
;;
esac
_filedir
}} &&
complete -F _openai openai
'''
return bash_completion

if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate shell completion scripts for OpenAI CLI')
parser.add_argument('--shell', choices=['bash', 'zsh'], default='bash', help='Shell type')
parser.add_argument('--print', action='store_true', help='Print completions to stdout')
args = parser.parse_args()

if args.print:
print(generate_completions())
else:
install_completion(args.shell)