import platform
import psutil
import torch
import subprocess
import json
import os

def get_system_info():
    """获取系统信息"""
    info = {
        "操作系统": platform.system(),
        "操作系统版本": platform.version(),
        "平台": platform.platform(),
        "处理器": get_processor_name(),
        "架构": platform.machine(),
    }
    return info

def get_processor_name():
    """获取详细的处理器名称"""
    if platform.system() == "Linux":
        try:
            with open("/proc/cpuinfo", "r") as f:
                for line in f:
                    if line.startswith("model name"):
                        return line.split(":")[1].strip()
            return "未知处理器"
        except Exception:
            return "未知处理器"
    else:
        return platform.processor()

def get_cpu_info():
    """获取CPU信息"""
    info = {
        "CPU核心数": psutil.cpu_count(logical=False),
        "CPU线程数": psutil.cpu_count(logical=True),
        "CPU使用率": psutil.cpu_percent(interval=1),
        "CPU频率": f"{psutil.cpu_freq().current / 1000:.2f} GHz",
    }
    return info

def get_memory_info():
    """获取内存信息"""
    mem = psutil.virtual_memory()
    info = {
        "总内存": f"{mem.total / (1024 ** 3):.2f} GB",
        "可用内存": f"{mem.available / (1024 ** 3):.2f} GB",
        "内存使用率": f"{mem.percent}%",
    }
    return info

def get_gpu_info():
    """获取GPU信息"""
    try:
        result = subprocess.run(['nvidia-smi', '--query-gpu=name,memory.total,memory.used,memory.free', '--format=csv,noheader'],
                                capture_output=True, text=True, check=True)
        gpus = []
        for line in result.stdout.strip().split('\n'):
            parts = line.split(', ')
            if len(parts) >= 3:
                gpus.append({
                    "GPU型号": parts[0],
                    "总显存": parts[1],
                    "已用显存": parts[2],
                    "空闲显存": f"{int(parts[1].split()[0]) - int(parts[2].split()[0])} MiB"
                })
        return gpus
    except (subprocess.CalledProcessError, FileNotFoundError):
        return ["未检测到NVIDIA GPU或nvidia-smi未安装"]

def get_deep_learning_frameworks():
    """获取深度学习框架版本"""
    frameworks = {}
    
    # PyTorch
    try:
        frameworks["PyTorch"] = {
            "版本": torch.__version__,
            "CUDA版本": torch.version.cuda if torch.cuda.is_available() else "未检测到",
            "cuDNN版本": torch.backends.cudnn.version() if torch.cuda.is_available() else "未检测到",
            "是否支持GPU": "是" if torch.cuda.is_available() else "否"
        }
    except Exception:
        frameworks["PyTorch"] = "未安装"
    
    return frameworks

def get_python_info():
    """获取Python环境信息"""
    info = {
        "Python版本": platform.python_version(),
        "Python编译器": platform.python_compiler(),
        "Python实现": platform.python_implementation(),
    }
    return info

def main():
    """主函数"""
    env_info = {
        "系统信息": get_system_info(),
        "CPU信息": get_cpu_info(),
        "内存信息": get_memory_info(),
        "GPU信息": get_gpu_info(),
        "深度学习框架": get_deep_learning_frameworks(),
        "Python环境": get_python_info(),
    }
    
    # 打印结果
    print(json.dumps(env_info, indent=4, ensure_ascii=False))
    
    # 可选:将结果保存到文件
    with open("environment_info.json", "w", encoding="utf-8") as f:
        json.dump(env_info, f, indent=4, ensure_ascii=False)
    
    print("\n环境信息已保存到 environment_info.json 文件中。")

if __name__ == "__main__":
    main()

写文章时大概率需要提供实验环境信息,一个脚本轻松解决