To err is human, to blame it on someone else is even more human, Jacob’s Law

While graphical user interfaces (GUIs) are common, command-line interfaces (CLIs) remain a very efficient way to interact with computers, especially for tasks like automation, AI, and system administration When you add AI capabilities —like Ollama’s local LLM engine— having a robust set of UI helpers makes a world of difference. In this article, we’ll walk through a Python module that provides:
nvim util.py
# -*- coding: utf-8 -*-
import re
from colorama import Fore, Style, init
import ollama # For interacting with the Ollama API
from datetime import datetime
from rich.console import Console
from rich.panel import Panel
from rich.text import Text
from rich.progress import Progress, SpinnerColumn, TextColumn
import os
import from quote import quote # Importing the quote function from the quote module
import random
# Initialize colorama for cross-platform colored output
init(autoreset=True)
def normalize_string(s: str) -> str:
"""Helper to normalize string for case and whitespace insensitive comparison.
Args:
s (str): The input string to normalize.
Returns:
str: A normalized string with reduced whitespace and lowercase letters.
"""
return re.sub(r"\s+", " ", s).lower()
def print_color(color: str, text: str) -> None:
"""Print text in a specified color.
Args:
color (str): The ANSI color code for the text.
text (str): The text to print.
Returns:
None
"""
print(f"\033[{color}m{text}\033[0m")
def display_message(text: str) -> None:
"""Display a message with a border.
Args:
text (str): The message text to display.
Returns:
None
"""
print(f"\n{Fore.MAGENTA}{'='*50}{Style.RESET_ALL}")
print(f"{Fore.YELLOW}{text}{Style.RESET_ALL}")
print(f"{Fore.MAGENTA}{'='*50}{Style.RESET_ALL}")
# Append the message to today's file
date_str = datetime.now().strftime("%d-%m-%Y") # e.g. "11-07-2025"
filename = f"{date_str}.txt"
with open(filename, "a", encoding="utf-8") as f:
f.write(text + "\n")
# An alternative implementation
def display_message(text: str) -> None:
"""Display a message with a border.
Args:
text (str): The message text to display.
Returns:
None
"""
print(f"\n{Fore.MAGENTA}{'='*50}{Style.RESET_ALL}")
print(f"{Fore.YELLOW}{text}{Style.RESET_ALL}")
print(f"{Fore.MAGENTA}{'='*50}{Style.RESET_ALL}")
with open("my_output.md", "a", encoding="utf-8") as md_file:
md_file.write(f'{text}.')
def clear_output_markdown() -> None:
"""Clear the output markdown file."""
# Clear the output file
with open("my_output.md", "w", encoding="utf-8") as md_file:
# Write an empty string to clear the file
md_file.write("")
def display_text_color(text: str, color: str = Fore.GREEN) -> None:
"""Display text in a specified color.
Args:
text (str): The text to display.
color (str): The ANSI color code for the text (default is green).
Returns:
None
"""
print(f"{color}{text}{Style.RESET_ALL}")
# Append to today's file
date_str = datetime.now().strftime("%d-%m-%Y") # e.g. "11-07-2025"
filename = f"{date_str}.txt"
with open(filename, "a", encoding="utf-8") as f:
f.write(text + "\n")
# An alternative implementation
def display_text_color(text: str = "Welcome to our CLI Assistant", color: str = Fore.GREEN) -> None:
"""Display text in a specified color.
Args:
text (str): The text to display.
color (str): The ANSI color code for the text (default is green).
Returns:
None
"""
print(f"{color}{text}{Style.RESET_ALL}")
with open("my_output.md", "a", encoding="utf-8") as md_file:
if color == Fore.GREEN:
md_file.write(f'{text}.')
else:
md_file.write(f'{text}.')
def display_alarm_color(
text: str,
color: str = Fore.RED,
emoji: str = "⏰"
) -> None:
"""Display an alarm message in a specified color, prefixed by an emoji to draw attention.
Args:
text (str): The alarm message to display.
color (str): The ANSI color code for the text (default is red).
emoji (str): An emoji to prefix the message (default is warning emoji).
Returns:
None
"""
print(f"{color}{emoji} {text}{Style.RESET_ALL}")
def display_log_color(
text: str,
color: str = Fore.GREEN,
level: str = "INFO"
) -> None:
"""Display a log message with a timestamp, log level, and color for easy scanning.
Args:
text (str): The log message to display.
color (str): The ANSI color code for the text (default is green).
level (str): The log level (default is "INFO").
Returns:
None
"""
# Get the current timestamp in YYYY-MM-DD HH:MM:SS format
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# Display the log message with a timestamp, log level, and color
print(f"{color}{timestamp} [{level}] {text}{Style.RESET_ALL}")
def display_chunck(text: str, color: str = Fore.GREEN) -> None:
"""Display text in a specified color without a newline.
Args:
text (str): The text to display.
color (str): The ANSI color code for the text (default is green).
Returns:
None
"""
# Print the text in the specified color without a newline
print(f"{color}{text}{Style.RESET_ALL}", end='', flush=True)
def chat_with_model(model_name, messages):
"""
Get response from model and update conversation
Args:
model_name (str): Name of the model to use
messages (list): Conversation history
Returns:
str: The complete response from the model or None if an error occurred.
"""
try:
# Get response from the model
response = ollama.chat(model=model_name, messages=messages)
# Append the model's response to the messages (conversation history)
messages.append({"role": "assistant", "content": response.message.content})
# Store the complete response from the model
complete_response = response.message.content
except Exception as e:
# Display error message if an error occurred
display_text_color(f"An error occurred while communicating with the model: {str(e)}", Fore.RED)
return
return complete_response # Return the complete response from the model
def call_ollama(content, system_prompt, model_name="deepseek-r1:8b", role = "user", temperature=0.7, max_tokens=10000):
"""Call the Ollama API with the provided content, system prompt, model, role, temperature, and max tokens.
Args:
content (str): The content to send to the model.
system_prompt (dict): The system prompt to use for the model, must contain 'role' and 'content'.
model_name (str): The name of the model to use. Default is "deepseek-r1:8b".
temperature (float): The temperature for the model's response, between 0 and 1. Default is 0.7.
max_tokens (int): The maximum number of tokens to generate. Default is 10000.
Returns:
str: The content of the response or raises an error if the call fails.
"""
# Validate input parameters
# Ensure content is a string
if not isinstance(content, str):
raise ValueError("content must be a string")
# Ensure model_name is a non-empty string
if not model_name or not isinstance(model_name, str):
raise ValueError("model_name must be a non-empty string")
# Ensure temperature is a float between 0 and 1
if not isinstance(temperature, (int, float)) or temperature < 0 or temperature > 1:
raise ValueError("temperature must be a float between 0 and 1")
# Ensure max_tokens is a positive integer
if not isinstance(max_tokens, int) or max_tokens <= 0:
raise ValueError("max_tokens must be a positive integer")
# Ensure system_prompt is a dictionary with 'role' and 'content' keys
if not isinstance(system_prompt, dict) or 'role' not in system_prompt or 'content' not in system_prompt:
raise ValueError("system_prompt must be a dictionary with 'role' and 'content' keys")
# Prepare the messages for the Ollama API
messages = [
system_prompt, # System prompt from mymessagess.py file
{"role": role, "content": content} # User prompt with provided content
]
# --- Spinner context ---
with Progress( # Add a progress bar for waiting
SpinnerColumn(), # Add a spinner
BarColumn(),
TextColumn("[progress.description]{task.description}"), # Display task description
transient=True, # Remove spinner after completion
) as progress:
# Create a task
task = progress.add_task(f"Calling Ollama {model_name}…", start=False)
# Start the task
progress.start_task(task)
try:
display_text_color("Calling Ollama API...", Fore.BLACK)
# Call the Ollama API with the provided model, messages and options
response = ollama.chat(
model=model_name,
messages=messages,
options={
"temperature": temperature,
"num_predict": max_tokens
}
)
# Append the model's response to the messages
messages.append({"role": "assistant", "content": response.message.content})
# Display the response from the model
text_horizontalRuler(f"Response from model {model_name}")
display_text_color(response.message.content, Fore.GREEN)
# Return the content of the response
return response.message.content
except Exception as e:
# Graceful error reporting
print(f"Ollama chat failed: {e}")
raise RuntimeError(f"Ollama chat failed: {e}")
def text_horizontalRuler(text: str) -> None:
"""Display a horizontal ruler with the specified text and color.
Args:
text (str): The text to display in the ruler.
color (str): The ANSI color code for the text (default is green).
Returns:
None
"""
# Create a Console object
console = Console()
# Print some text in the specified color
console.print(text, style="magenta")
# Print a horizontal ruler
console.rule()
def clear_terminal():
"""Clear the terminal screen based on the operating system.
Returns:
None
"""
# Check the operating system
# If Windows, use cls to clear the terminal
if platform.system() == "Windows":
subprocess.run("cls", shell=True, check=False)
else:
# Otherwise, use clear
subprocess.run("clear", shell=True, check=False)
def print_header(title: str) -> None:
"""
Clear the terminal and display a centred header in a Rich panel.
Args:
title (str): The title to display in the header.
Returns:
None
"""
# Create a Console object
console = Console()
# Clear the terminal using the Console object
console.clear()
# Clear the terminal based on the operating system
clear_terminal()
# Build the header text with the current time, title, and style
now = datetime.now().strftime("%H:%M:%S")
header_text = Text(f"{title}\n{now}", style="bold cyan", justify="center")
# Print the header inside a box with a bright magenta border
console.print(Panel(header_text, border_style="bright_magenta", padding=(1, 4)))
def select_quote():
"""Print a list of quotes from the quote library.
Returns:
None or a list of quotes
"""
topics = ["life", "love", "inspiration", "wisdom", "humor", "motivation", "maths",
"coding", "linux", "software", "programming"] # List of topics for quotes
topic = random.choice(topics) # Randomly select a topic from the list
# Display a message indicating that a quote is being fetched
display_text_color(f"Fetching a quote on {topic}...", Fore.BLACK)
# Call the quote function with the selected topic
myquotes = quote(topic, limit=5)
# Display the quotes from the library
for entry in myquotes:
display_text_color(
f"{entry['quote']}\n -- {entry['author']} ({entry['book']})\n", Fore.CYAN)
# Return the list of quotes
return myquotes
if __name__ == "__main__":
print(normalize_string(" Example String ")) # Example usage
display_message("This is a message with a border.")
display_text_color("This is a colored message.", Fore.CYAN)
display_alarm_color("This is a colored message.", Fore.CYAN)
print_header("My header")