This commit is contained in:
2025-04-06 03:14:47 +02:00
parent aaf9ab523b
commit b9c99befab
2263 changed files with 401112 additions and 20 deletions

View File

@ -0,0 +1,165 @@
# Copyright (C) 2003-2011 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# flake8: noqa
import sys
from paramiko._version import __version__, __version_info__
from paramiko.transport import (
SecurityOptions,
ServiceRequestingTransport,
Transport,
)
from paramiko.client import (
AutoAddPolicy,
MissingHostKeyPolicy,
RejectPolicy,
SSHClient,
WarningPolicy,
)
from paramiko.auth_handler import AuthHandler
from paramiko.auth_strategy import (
AuthFailure,
AuthStrategy,
AuthResult,
AuthSource,
InMemoryPrivateKey,
NoneAuth,
OnDiskPrivateKey,
Password,
PrivateKey,
SourceResult,
)
from paramiko.ssh_gss import GSSAuth, GSS_AUTH_AVAILABLE, GSS_EXCEPTIONS
from paramiko.channel import (
Channel,
ChannelFile,
ChannelStderrFile,
ChannelStdinFile,
)
from paramiko.ssh_exception import (
AuthenticationException,
BadAuthenticationType,
BadHostKeyException,
ChannelException,
ConfigParseError,
CouldNotCanonicalize,
IncompatiblePeer,
MessageOrderError,
PasswordRequiredException,
ProxyCommandFailure,
SSHException,
)
from paramiko.server import ServerInterface, SubsystemHandler, InteractiveQuery
from paramiko.rsakey import RSAKey
from paramiko.dsskey import DSSKey
from paramiko.ecdsakey import ECDSAKey
from paramiko.ed25519key import Ed25519Key
from paramiko.sftp import SFTPError, BaseSFTP
from paramiko.sftp_client import SFTP, SFTPClient
from paramiko.sftp_server import SFTPServer
from paramiko.sftp_attr import SFTPAttributes
from paramiko.sftp_handle import SFTPHandle
from paramiko.sftp_si import SFTPServerInterface
from paramiko.sftp_file import SFTPFile
from paramiko.message import Message
from paramiko.packet import Packetizer
from paramiko.file import BufferedFile
from paramiko.agent import Agent, AgentKey
from paramiko.pkey import PKey, PublicBlob, UnknownKeyType
from paramiko.hostkeys import HostKeys
from paramiko.config import SSHConfig, SSHConfigDict
from paramiko.proxy import ProxyCommand
from paramiko.common import (
AUTH_SUCCESSFUL,
AUTH_PARTIALLY_SUCCESSFUL,
AUTH_FAILED,
OPEN_SUCCEEDED,
OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
OPEN_FAILED_CONNECT_FAILED,
OPEN_FAILED_UNKNOWN_CHANNEL_TYPE,
OPEN_FAILED_RESOURCE_SHORTAGE,
)
from paramiko.sftp import (
SFTP_OK,
SFTP_EOF,
SFTP_NO_SUCH_FILE,
SFTP_PERMISSION_DENIED,
SFTP_FAILURE,
SFTP_BAD_MESSAGE,
SFTP_NO_CONNECTION,
SFTP_CONNECTION_LOST,
SFTP_OP_UNSUPPORTED,
)
from paramiko.common import io_sleep
# TODO: I guess a real plugin system might be nice for future expansion...
key_classes = [DSSKey, RSAKey, Ed25519Key, ECDSAKey]
__author__ = "Jeff Forcier <jeff@bitprophet.org>"
__license__ = "GNU Lesser General Public License (LGPL)"
# TODO 4.0: remove this, jeez
__all__ = [
"Agent",
"AgentKey",
"AuthenticationException",
"AutoAddPolicy",
"BadAuthenticationType",
"BadHostKeyException",
"BufferedFile",
"Channel",
"ChannelException",
"ConfigParseError",
"CouldNotCanonicalize",
"DSSKey",
"ECDSAKey",
"Ed25519Key",
"HostKeys",
"Message",
"MissingHostKeyPolicy",
"PKey",
"PasswordRequiredException",
"ProxyCommand",
"ProxyCommandFailure",
"RSAKey",
"RejectPolicy",
"SFTP",
"SFTPAttributes",
"SFTPClient",
"SFTPError",
"SFTPFile",
"SFTPHandle",
"SFTPServer",
"SFTPServerInterface",
"SSHClient",
"SSHConfig",
"SSHConfigDict",
"SSHException",
"SecurityOptions",
"ServerInterface",
"SubsystemHandler",
"Transport",
"WarningPolicy",
"io_sleep",
"util",
]

View File

@ -0,0 +1,2 @@
__version_info__ = (3, 5, 1)
__version__ = ".".join(map(str, __version_info__))

View File

@ -0,0 +1,413 @@
"""
Windows API functions implemented as ctypes functions and classes as found
in jaraco.windows (3.4.1).
If you encounter issues with this module, please consider reporting the issues
in jaraco.windows and asking the author to port the fixes back here.
"""
import builtins
import ctypes.wintypes
from paramiko.util import u
######################
# jaraco.windows.error
def format_system_message(errno):
"""
Call FormatMessage with a system error number to retrieve
the descriptive error message.
"""
# first some flags used by FormatMessageW
ALLOCATE_BUFFER = 0x100
FROM_SYSTEM = 0x1000
# Let FormatMessageW allocate the buffer (we'll free it below)
# Also, let it know we want a system error message.
flags = ALLOCATE_BUFFER | FROM_SYSTEM
source = None
message_id = errno
language_id = 0
result_buffer = ctypes.wintypes.LPWSTR()
buffer_size = 0
arguments = None
bytes = ctypes.windll.kernel32.FormatMessageW(
flags,
source,
message_id,
language_id,
ctypes.byref(result_buffer),
buffer_size,
arguments,
)
# note the following will cause an infinite loop if GetLastError
# repeatedly returns an error that cannot be formatted, although
# this should not happen.
handle_nonzero_success(bytes)
message = result_buffer.value
ctypes.windll.kernel32.LocalFree(result_buffer)
return message
class WindowsError(builtins.WindowsError):
"""more info about errors at
http://msdn.microsoft.com/en-us/library/ms681381(VS.85).aspx"""
def __init__(self, value=None):
if value is None:
value = ctypes.windll.kernel32.GetLastError()
strerror = format_system_message(value)
args = 0, strerror, None, value
super().__init__(*args)
@property
def message(self):
return self.strerror
@property
def code(self):
return self.winerror
def __str__(self):
return self.message
def __repr__(self):
return "{self.__class__.__name__}({self.winerror})".format(**vars())
def handle_nonzero_success(result):
if result == 0:
raise WindowsError()
###########################
# jaraco.windows.api.memory
GMEM_MOVEABLE = 0x2
GlobalAlloc = ctypes.windll.kernel32.GlobalAlloc
GlobalAlloc.argtypes = ctypes.wintypes.UINT, ctypes.c_size_t
GlobalAlloc.restype = ctypes.wintypes.HANDLE
GlobalLock = ctypes.windll.kernel32.GlobalLock
GlobalLock.argtypes = (ctypes.wintypes.HGLOBAL,)
GlobalLock.restype = ctypes.wintypes.LPVOID
GlobalUnlock = ctypes.windll.kernel32.GlobalUnlock
GlobalUnlock.argtypes = (ctypes.wintypes.HGLOBAL,)
GlobalUnlock.restype = ctypes.wintypes.BOOL
GlobalSize = ctypes.windll.kernel32.GlobalSize
GlobalSize.argtypes = (ctypes.wintypes.HGLOBAL,)
GlobalSize.restype = ctypes.c_size_t
CreateFileMapping = ctypes.windll.kernel32.CreateFileMappingW
CreateFileMapping.argtypes = [
ctypes.wintypes.HANDLE,
ctypes.c_void_p,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.wintypes.LPWSTR,
]
CreateFileMapping.restype = ctypes.wintypes.HANDLE
MapViewOfFile = ctypes.windll.kernel32.MapViewOfFile
MapViewOfFile.restype = ctypes.wintypes.HANDLE
UnmapViewOfFile = ctypes.windll.kernel32.UnmapViewOfFile
UnmapViewOfFile.argtypes = (ctypes.wintypes.HANDLE,)
RtlMoveMemory = ctypes.windll.kernel32.RtlMoveMemory
RtlMoveMemory.argtypes = (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)
ctypes.windll.kernel32.LocalFree.argtypes = (ctypes.wintypes.HLOCAL,)
#####################
# jaraco.windows.mmap
class MemoryMap:
"""
A memory map object which can have security attributes overridden.
"""
def __init__(self, name, length, security_attributes=None):
self.name = name
self.length = length
self.security_attributes = security_attributes
self.pos = 0
def __enter__(self):
p_SA = (
ctypes.byref(self.security_attributes)
if self.security_attributes
else None
)
INVALID_HANDLE_VALUE = -1
PAGE_READWRITE = 0x4
FILE_MAP_WRITE = 0x2
filemap = ctypes.windll.kernel32.CreateFileMappingW(
INVALID_HANDLE_VALUE,
p_SA,
PAGE_READWRITE,
0,
self.length,
u(self.name),
)
handle_nonzero_success(filemap)
if filemap == INVALID_HANDLE_VALUE:
raise Exception("Failed to create file mapping")
self.filemap = filemap
self.view = MapViewOfFile(filemap, FILE_MAP_WRITE, 0, 0, 0)
return self
def seek(self, pos):
self.pos = pos
def write(self, msg):
assert isinstance(msg, bytes)
n = len(msg)
if self.pos + n >= self.length: # A little safety.
raise ValueError(f"Refusing to write {n} bytes")
dest = self.view + self.pos
length = ctypes.c_size_t(n)
ctypes.windll.kernel32.RtlMoveMemory(dest, msg, length)
self.pos += n
def read(self, n):
"""
Read n bytes from mapped view.
"""
out = ctypes.create_string_buffer(n)
source = self.view + self.pos
length = ctypes.c_size_t(n)
ctypes.windll.kernel32.RtlMoveMemory(out, source, length)
self.pos += n
return out.raw
def __exit__(self, exc_type, exc_val, tb):
ctypes.windll.kernel32.UnmapViewOfFile(self.view)
ctypes.windll.kernel32.CloseHandle(self.filemap)
#############################
# jaraco.windows.api.security
# from WinNT.h
READ_CONTROL = 0x00020000
STANDARD_RIGHTS_REQUIRED = 0x000F0000
STANDARD_RIGHTS_READ = READ_CONTROL
STANDARD_RIGHTS_WRITE = READ_CONTROL
STANDARD_RIGHTS_EXECUTE = READ_CONTROL
STANDARD_RIGHTS_ALL = 0x001F0000
# from NTSecAPI.h
POLICY_VIEW_LOCAL_INFORMATION = 0x00000001
POLICY_VIEW_AUDIT_INFORMATION = 0x00000002
POLICY_GET_PRIVATE_INFORMATION = 0x00000004
POLICY_TRUST_ADMIN = 0x00000008
POLICY_CREATE_ACCOUNT = 0x00000010
POLICY_CREATE_SECRET = 0x00000020
POLICY_CREATE_PRIVILEGE = 0x00000040
POLICY_SET_DEFAULT_QUOTA_LIMITS = 0x00000080
POLICY_SET_AUDIT_REQUIREMENTS = 0x00000100
POLICY_AUDIT_LOG_ADMIN = 0x00000200
POLICY_SERVER_ADMIN = 0x00000400
POLICY_LOOKUP_NAMES = 0x00000800
POLICY_NOTIFICATION = 0x00001000
POLICY_ALL_ACCESS = (
STANDARD_RIGHTS_REQUIRED
| POLICY_VIEW_LOCAL_INFORMATION
| POLICY_VIEW_AUDIT_INFORMATION
| POLICY_GET_PRIVATE_INFORMATION
| POLICY_TRUST_ADMIN
| POLICY_CREATE_ACCOUNT
| POLICY_CREATE_SECRET
| POLICY_CREATE_PRIVILEGE
| POLICY_SET_DEFAULT_QUOTA_LIMITS
| POLICY_SET_AUDIT_REQUIREMENTS
| POLICY_AUDIT_LOG_ADMIN
| POLICY_SERVER_ADMIN
| POLICY_LOOKUP_NAMES
)
POLICY_READ = (
STANDARD_RIGHTS_READ
| POLICY_VIEW_AUDIT_INFORMATION
| POLICY_GET_PRIVATE_INFORMATION
)
POLICY_WRITE = (
STANDARD_RIGHTS_WRITE
| POLICY_TRUST_ADMIN
| POLICY_CREATE_ACCOUNT
| POLICY_CREATE_SECRET
| POLICY_CREATE_PRIVILEGE
| POLICY_SET_DEFAULT_QUOTA_LIMITS
| POLICY_SET_AUDIT_REQUIREMENTS
| POLICY_AUDIT_LOG_ADMIN
| POLICY_SERVER_ADMIN
)
POLICY_EXECUTE = (
STANDARD_RIGHTS_EXECUTE
| POLICY_VIEW_LOCAL_INFORMATION
| POLICY_LOOKUP_NAMES
)
class TokenAccess:
TOKEN_QUERY = 0x8
class TokenInformationClass:
TokenUser = 1
class TOKEN_USER(ctypes.Structure):
num = 1
_fields_ = [
("SID", ctypes.c_void_p),
("ATTRIBUTES", ctypes.wintypes.DWORD),
]
class SECURITY_DESCRIPTOR(ctypes.Structure):
"""
typedef struct _SECURITY_DESCRIPTOR
{
UCHAR Revision;
UCHAR Sbz1;
SECURITY_DESCRIPTOR_CONTROL Control;
PSID Owner;
PSID Group;
PACL Sacl;
PACL Dacl;
} SECURITY_DESCRIPTOR;
"""
SECURITY_DESCRIPTOR_CONTROL = ctypes.wintypes.USHORT
REVISION = 1
_fields_ = [
("Revision", ctypes.c_ubyte),
("Sbz1", ctypes.c_ubyte),
("Control", SECURITY_DESCRIPTOR_CONTROL),
("Owner", ctypes.c_void_p),
("Group", ctypes.c_void_p),
("Sacl", ctypes.c_void_p),
("Dacl", ctypes.c_void_p),
]
class SECURITY_ATTRIBUTES(ctypes.Structure):
"""
typedef struct _SECURITY_ATTRIBUTES {
DWORD nLength;
LPVOID lpSecurityDescriptor;
BOOL bInheritHandle;
} SECURITY_ATTRIBUTES;
"""
_fields_ = [
("nLength", ctypes.wintypes.DWORD),
("lpSecurityDescriptor", ctypes.c_void_p),
("bInheritHandle", ctypes.wintypes.BOOL),
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.nLength = ctypes.sizeof(SECURITY_ATTRIBUTES)
@property
def descriptor(self):
return self._descriptor
@descriptor.setter
def descriptor(self, value):
self._descriptor = value
self.lpSecurityDescriptor = ctypes.addressof(value)
ctypes.windll.advapi32.SetSecurityDescriptorOwner.argtypes = (
ctypes.POINTER(SECURITY_DESCRIPTOR),
ctypes.c_void_p,
ctypes.wintypes.BOOL,
)
#########################
# jaraco.windows.security
def GetTokenInformation(token, information_class):
"""
Given a token, get the token information for it.
"""
data_size = ctypes.wintypes.DWORD()
ctypes.windll.advapi32.GetTokenInformation(
token, information_class.num, 0, 0, ctypes.byref(data_size)
)
data = ctypes.create_string_buffer(data_size.value)
handle_nonzero_success(
ctypes.windll.advapi32.GetTokenInformation(
token,
information_class.num,
ctypes.byref(data),
ctypes.sizeof(data),
ctypes.byref(data_size),
)
)
return ctypes.cast(data, ctypes.POINTER(TOKEN_USER)).contents
def OpenProcessToken(proc_handle, access):
result = ctypes.wintypes.HANDLE()
proc_handle = ctypes.wintypes.HANDLE(proc_handle)
handle_nonzero_success(
ctypes.windll.advapi32.OpenProcessToken(
proc_handle, access, ctypes.byref(result)
)
)
return result
def get_current_user():
"""
Return a TOKEN_USER for the owner of this process.
"""
process = OpenProcessToken(
ctypes.windll.kernel32.GetCurrentProcess(), TokenAccess.TOKEN_QUERY
)
return GetTokenInformation(process, TOKEN_USER)
def get_security_attributes_for_user(user=None):
"""
Return a SECURITY_ATTRIBUTES structure with the SID set to the
specified user (uses current user if none is specified).
"""
if user is None:
user = get_current_user()
assert isinstance(user, TOKEN_USER), "user must be TOKEN_USER instance"
SD = SECURITY_DESCRIPTOR()
SA = SECURITY_ATTRIBUTES()
# by attaching the actual security descriptor, it will be garbage-
# collected with the security attributes
SA.descriptor = SD
SA.bInheritHandle = 1
ctypes.windll.advapi32.InitializeSecurityDescriptor(
ctypes.byref(SD), SECURITY_DESCRIPTOR.REVISION
)
ctypes.windll.advapi32.SetSecurityDescriptorOwner(
ctypes.byref(SD), user.SID, 0
)
return SA

View File

@ -0,0 +1,497 @@
# Copyright (C) 2003-2007 John Rochester <john@jrochester.org>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
SSH Agent interface
"""
import os
import socket
import struct
import sys
import threading
import time
import tempfile
import stat
from logging import DEBUG
from select import select
from paramiko.common import io_sleep, byte_chr
from paramiko.ssh_exception import SSHException, AuthenticationException
from paramiko.message import Message
from paramiko.pkey import PKey, UnknownKeyType
from paramiko.util import asbytes, get_logger
cSSH2_AGENTC_REQUEST_IDENTITIES = byte_chr(11)
SSH2_AGENT_IDENTITIES_ANSWER = 12
cSSH2_AGENTC_SIGN_REQUEST = byte_chr(13)
SSH2_AGENT_SIGN_RESPONSE = 14
SSH_AGENT_RSA_SHA2_256 = 2
SSH_AGENT_RSA_SHA2_512 = 4
# NOTE: RFC mildly confusing; while these flags are OR'd together, OpenSSH at
# least really treats them like "AND"s, in the sense that if it finds the
# SHA256 flag set it won't continue looking at the SHA512 one; it
# short-circuits right away.
# Thus, we never want to eg submit 6 to say "either's good".
ALGORITHM_FLAG_MAP = {
"rsa-sha2-256": SSH_AGENT_RSA_SHA2_256,
"rsa-sha2-512": SSH_AGENT_RSA_SHA2_512,
}
for key, value in list(ALGORITHM_FLAG_MAP.items()):
ALGORITHM_FLAG_MAP[f"{key}-cert-v01@openssh.com"] = value
# TODO 4.0: rename all these - including making some of their methods public?
class AgentSSH:
def __init__(self):
self._conn = None
self._keys = ()
def get_keys(self):
"""
Return the list of keys available through the SSH agent, if any. If
no SSH agent was running (or it couldn't be contacted), an empty list
will be returned.
This method performs no IO, just returns the list of keys retrieved
when the connection was made.
:return:
a tuple of `.AgentKey` objects representing keys available on the
SSH agent
"""
return self._keys
def _connect(self, conn):
self._conn = conn
ptype, result = self._send_message(cSSH2_AGENTC_REQUEST_IDENTITIES)
if ptype != SSH2_AGENT_IDENTITIES_ANSWER:
raise SSHException("could not get keys from ssh-agent")
keys = []
for i in range(result.get_int()):
keys.append(
AgentKey(
agent=self,
blob=result.get_binary(),
comment=result.get_text(),
)
)
self._keys = tuple(keys)
def _close(self):
if self._conn is not None:
self._conn.close()
self._conn = None
self._keys = ()
def _send_message(self, msg):
msg = asbytes(msg)
self._conn.send(struct.pack(">I", len(msg)) + msg)
data = self._read_all(4)
msg = Message(self._read_all(struct.unpack(">I", data)[0]))
return ord(msg.get_byte()), msg
def _read_all(self, wanted):
result = self._conn.recv(wanted)
while len(result) < wanted:
if len(result) == 0:
raise SSHException("lost ssh-agent")
extra = self._conn.recv(wanted - len(result))
if len(extra) == 0:
raise SSHException("lost ssh-agent")
result += extra
return result
class AgentProxyThread(threading.Thread):
"""
Class in charge of communication between two channels.
"""
def __init__(self, agent):
threading.Thread.__init__(self, target=self.run)
self._agent = agent
self._exit = False
def run(self):
try:
(r, addr) = self.get_connection()
# Found that r should be either
# a socket from the socket library or None
self.__inr = r
# The address should be an IP address as a string? or None
self.__addr = addr
self._agent.connect()
if not isinstance(self._agent, int) and (
self._agent._conn is None
or not hasattr(self._agent._conn, "fileno")
):
raise AuthenticationException("Unable to connect to SSH agent")
self._communicate()
except:
# XXX Not sure what to do here ... raise or pass ?
raise
def _communicate(self):
import fcntl
oldflags = fcntl.fcntl(self.__inr, fcntl.F_GETFL)
fcntl.fcntl(self.__inr, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
while not self._exit:
events = select([self._agent._conn, self.__inr], [], [], 0.5)
for fd in events[0]:
if self._agent._conn == fd:
data = self._agent._conn.recv(512)
if len(data) != 0:
self.__inr.send(data)
else:
self._close()
break
elif self.__inr == fd:
data = self.__inr.recv(512)
if len(data) != 0:
self._agent._conn.send(data)
else:
self._close()
break
time.sleep(io_sleep)
def _close(self):
self._exit = True
self.__inr.close()
self._agent._conn.close()
class AgentLocalProxy(AgentProxyThread):
"""
Class to be used when wanting to ask a local SSH Agent being
asked from a remote fake agent (so use a unix socket for ex.)
"""
def __init__(self, agent):
AgentProxyThread.__init__(self, agent)
def get_connection(self):
"""
Return a pair of socket object and string address.
May block!
"""
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
conn.bind(self._agent._get_filename())
conn.listen(1)
(r, addr) = conn.accept()
return r, addr
except:
raise
class AgentRemoteProxy(AgentProxyThread):
"""
Class to be used when wanting to ask a remote SSH Agent
"""
def __init__(self, agent, chan):
AgentProxyThread.__init__(self, agent)
self.__chan = chan
def get_connection(self):
return self.__chan, None
def get_agent_connection():
"""
Returns some SSH agent object, or None if none were found/supported.
.. versionadded:: 2.10
"""
if ("SSH_AUTH_SOCK" in os.environ) and (sys.platform != "win32"):
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
conn.connect(os.environ["SSH_AUTH_SOCK"])
return conn
except:
# probably a dangling env var: the ssh agent is gone
return
elif sys.platform == "win32":
from . import win_pageant, win_openssh
conn = None
if win_pageant.can_talk_to_agent():
conn = win_pageant.PageantConnection()
elif win_openssh.can_talk_to_agent():
conn = win_openssh.OpenSSHAgentConnection()
return conn
else:
# no agent support
return
class AgentClientProxy:
"""
Class proxying request as a client:
#. client ask for a request_forward_agent()
#. server creates a proxy and a fake SSH Agent
#. server ask for establishing a connection when needed,
calling the forward_agent_handler at client side.
#. the forward_agent_handler launch a thread for connecting
the remote fake agent and the local agent
#. Communication occurs ...
"""
def __init__(self, chanRemote):
self._conn = None
self.__chanR = chanRemote
self.thread = AgentRemoteProxy(self, chanRemote)
self.thread.start()
def __del__(self):
self.close()
def connect(self):
"""
Method automatically called by ``AgentProxyThread.run``.
"""
conn = get_agent_connection()
if not conn:
return
self._conn = conn
def close(self):
"""
Close the current connection and terminate the agent
Should be called manually
"""
if hasattr(self, "thread"):
self.thread._exit = True
self.thread.join(1000)
if self._conn is not None:
self._conn.close()
class AgentServerProxy(AgentSSH):
"""
Allows an SSH server to access a forwarded agent.
This also creates a unix domain socket on the system to allow external
programs to also access the agent. For this reason, you probably only want
to create one of these.
:meth:`connect` must be called before it is usable. This will also load the
list of keys the agent contains. You must also call :meth:`close` in
order to clean up the unix socket and the thread that maintains it.
(:class:`contextlib.closing` might be helpful to you.)
:param .Transport t: Transport used for SSH Agent communication forwarding
:raises: `.SSHException` -- mostly if we lost the agent
"""
def __init__(self, t):
AgentSSH.__init__(self)
self.__t = t
self._dir = tempfile.mkdtemp("sshproxy")
os.chmod(self._dir, stat.S_IRWXU)
self._file = self._dir + "/sshproxy.ssh"
self.thread = AgentLocalProxy(self)
self.thread.start()
def __del__(self):
self.close()
def connect(self):
conn_sock = self.__t.open_forward_agent_channel()
if conn_sock is None:
raise SSHException("lost ssh-agent")
conn_sock.set_name("auth-agent")
self._connect(conn_sock)
def close(self):
"""
Terminate the agent, clean the files, close connections
Should be called manually
"""
os.remove(self._file)
os.rmdir(self._dir)
self.thread._exit = True
self.thread.join(1000)
self._close()
def get_env(self):
"""
Helper for the environment under unix
:return:
a dict containing the ``SSH_AUTH_SOCK`` environment variables
"""
return {"SSH_AUTH_SOCK": self._get_filename()}
def _get_filename(self):
return self._file
class AgentRequestHandler:
"""
Primary/default implementation of SSH agent forwarding functionality.
Simply instantiate this class, handing it a live command-executing session
object, and it will handle forwarding any local SSH agent processes it
finds.
For example::
# Connect
client = SSHClient()
client.connect(host, port, username)
# Obtain session
session = client.get_transport().open_session()
# Forward local agent
AgentRequestHandler(session)
# Commands executed after this point will see the forwarded agent on
# the remote end.
session.exec_command("git clone https://my.git.repository/")
"""
def __init__(self, chanClient):
self._conn = None
self.__chanC = chanClient
chanClient.request_forward_agent(self._forward_agent_handler)
self.__clientProxys = []
def _forward_agent_handler(self, chanRemote):
self.__clientProxys.append(AgentClientProxy(chanRemote))
def __del__(self):
self.close()
def close(self):
for p in self.__clientProxys:
p.close()
class Agent(AgentSSH):
"""
Client interface for using private keys from an SSH agent running on the
local machine. If an SSH agent is running, this class can be used to
connect to it and retrieve `.PKey` objects which can be used when
attempting to authenticate to remote SSH servers.
Upon initialization, a session with the local machine's SSH agent is
opened, if one is running. If no agent is running, initialization will
succeed, but `get_keys` will return an empty tuple.
:raises: `.SSHException` --
if an SSH agent is found, but speaks an incompatible protocol
.. versionchanged:: 2.10
Added support for native openssh agent on windows (extending previous
putty pageant support)
"""
def __init__(self):
AgentSSH.__init__(self)
conn = get_agent_connection()
if not conn:
return
self._connect(conn)
def close(self):
"""
Close the SSH agent connection.
"""
self._close()
class AgentKey(PKey):
"""
Private key held in a local SSH agent. This type of key can be used for
authenticating to a remote server (signing). Most other key operations
work as expected.
.. versionchanged:: 3.2
Added the ``comment`` kwarg and attribute.
.. versionchanged:: 3.2
Added the ``.inner_key`` attribute holding a reference to the 'real'
key instance this key is a proxy for, if one was obtainable, else None.
"""
def __init__(self, agent, blob, comment=""):
self.agent = agent
self.blob = blob
self.comment = comment
msg = Message(blob)
self.name = msg.get_text()
self._logger = get_logger(__file__)
self.inner_key = None
try:
self.inner_key = PKey.from_type_string(
key_type=self.name, key_bytes=blob
)
except UnknownKeyType:
# Log, but don't explode, since inner_key is a best-effort thing.
err = "Unable to derive inner_key for agent key of type {!r}"
self.log(DEBUG, err.format(self.name))
def log(self, *args, **kwargs):
return self._logger.log(*args, **kwargs)
def asbytes(self):
# Prefer inner_key.asbytes, since that will differ for eg RSA-CERT
return self.inner_key.asbytes() if self.inner_key else self.blob
def get_name(self):
return self.name
def get_bits(self):
# Have to work around PKey's default get_bits being crap
if self.inner_key is not None:
return self.inner_key.get_bits()
return super().get_bits()
def __getattr__(self, name):
"""
Proxy any un-implemented methods/properties to the inner_key.
"""
if self.inner_key is None: # nothing to proxy to
raise AttributeError(name)
return getattr(self.inner_key, name)
@property
def _fields(self):
fallback = [self.get_name(), self.blob]
return self.inner_key._fields if self.inner_key else fallback
def sign_ssh_data(self, data, algorithm=None):
msg = Message()
msg.add_byte(cSSH2_AGENTC_SIGN_REQUEST)
# NOTE: this used to be just self.blob, which is not entirely right for
# RSA-CERT 'keys' - those end up always degrading to ssh-rsa type
# signatures, for reasons probably internal to OpenSSH's agent code,
# even if everything else wants SHA2 (including our flag map).
msg.add_string(self.asbytes())
msg.add_string(data)
msg.add_int(ALGORITHM_FLAG_MAP.get(algorithm, 0))
ptype, result = self.agent._send_message(msg)
if ptype != SSH2_AGENT_SIGN_RESPONSE:
raise SSHException("key cannot be used for signing")
return result.get_binary()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,306 @@
"""
Modern, adaptable authentication machinery.
Replaces certain parts of `.SSHClient`. For a concrete implementation, see the
``OpenSSHAuthStrategy`` class in `Fabric <https://fabfile.org>`_.
"""
from collections import namedtuple
from .agent import AgentKey
from .util import get_logger
from .ssh_exception import AuthenticationException
class AuthSource:
"""
Some SSH authentication source, such as a password, private key, or agent.
See subclasses in this module for concrete implementations.
All implementations must accept at least a ``username`` (``str``) kwarg.
"""
def __init__(self, username):
self.username = username
def _repr(self, **kwargs):
# TODO: are there any good libs for this? maybe some helper from
# structlog?
pairs = [f"{k}={v!r}" for k, v in kwargs.items()]
joined = ", ".join(pairs)
return f"{self.__class__.__name__}({joined})"
def __repr__(self):
return self._repr()
def authenticate(self, transport):
"""
Perform authentication.
"""
raise NotImplementedError
class NoneAuth(AuthSource):
"""
Auth type "none", ie https://www.rfc-editor.org/rfc/rfc4252#section-5.2 .
"""
def authenticate(self, transport):
return transport.auth_none(self.username)
class Password(AuthSource):
"""
Password authentication.
:param callable password_getter:
A lazy callable that should return a `str` password value at
authentication time, such as a `functools.partial` wrapping
`getpass.getpass`, an API call to a secrets store, or similar.
If you already know the password at instantiation time, you should
simply use something like ``lambda: "my literal"`` (for a literal, but
also, shame on you!) or ``lambda: variable_name`` (for something stored
in a variable).
"""
def __init__(self, username, password_getter):
super().__init__(username=username)
self.password_getter = password_getter
def __repr__(self):
# Password auth is marginally more 'username-caring' than pkeys, so may
# as well log that info here.
return super()._repr(user=self.username)
def authenticate(self, transport):
# Lazily get the password, in case it's prompting a user
# TODO: be nice to log source _of_ the password?
password = self.password_getter()
return transport.auth_password(self.username, password)
# TODO 4.0: twiddle this, or PKey, or both, so they're more obviously distinct.
# TODO 4.0: the obvious is to make this more wordy (PrivateKeyAuth), the
# minimalist approach might be to rename PKey to just Key (esp given all the
# subclasses are WhateverKey and not WhateverPKey)
class PrivateKey(AuthSource):
"""
Essentially a mixin for private keys.
Knows how to auth, but leaves key material discovery/loading/decryption to
subclasses.
Subclasses **must** ensure that they've set ``self.pkey`` to a decrypted
`.PKey` instance before calling ``super().authenticate``; typically
either in their ``__init__``, or in an overridden ``authenticate`` prior to
its `super` call.
"""
def authenticate(self, transport):
return transport.auth_publickey(self.username, self.pkey)
class InMemoryPrivateKey(PrivateKey):
"""
An in-memory, decrypted `.PKey` object.
"""
def __init__(self, username, pkey):
super().__init__(username=username)
# No decryption (presumably) necessary!
self.pkey = pkey
def __repr__(self):
# NOTE: most of interesting repr-bits for private keys is in PKey.
# TODO: tacking on agent-ness like this is a bit awkward, but, eh?
rep = super()._repr(pkey=self.pkey)
if isinstance(self.pkey, AgentKey):
rep += " [agent]"
return rep
class OnDiskPrivateKey(PrivateKey):
"""
Some on-disk private key that needs opening and possibly decrypting.
:param str source:
String tracking where this key's path was specified; should be one of
``"ssh-config"``, ``"python-config"``, or ``"implicit-home"``.
:param Path path:
The filesystem path this key was loaded from.
:param PKey pkey:
The `PKey` object this auth source uses/represents.
"""
def __init__(self, username, source, path, pkey):
super().__init__(username=username)
self.source = source
allowed = ("ssh-config", "python-config", "implicit-home")
if source not in allowed:
raise ValueError(f"source argument must be one of: {allowed!r}")
self.path = path
# Superclass wants .pkey, other two are mostly for display/debugging.
self.pkey = pkey
def __repr__(self):
return self._repr(
key=self.pkey, source=self.source, path=str(self.path)
)
# TODO re sources: is there anything in an OpenSSH config file that doesn't fit
# into what Paramiko already had kwargs for?
SourceResult = namedtuple("SourceResult", ["source", "result"])
# TODO: tempting to make this an OrderedDict, except the keys essentially want
# to be rich objects (AuthSources) which do not make for useful user indexing?
# TODO: members being vanilla tuples is pretty old-school/expedient; they
# "really" want to be something that's type friendlier (unless the tuple's 2nd
# member being a Union of two types is "fine"?), which I assume means yet more
# classes, eg an abstract SourceResult with concrete AuthSuccess and
# AuthFailure children?
# TODO: arguably we want __init__ typechecking of the members (or to leverage
# mypy by classifying this literally as list-of-AuthSource?)
class AuthResult(list):
"""
Represents a partial or complete SSH authentication attempt.
This class conceptually extends `AuthStrategy` by pairing the former's
authentication **sources** with the **results** of trying to authenticate
with them.
`AuthResult` is a (subclass of) `list` of `namedtuple`, which are of the
form ``namedtuple('SourceResult', 'source', 'result')`` (where the
``source`` member is an `AuthSource` and the ``result`` member is either a
return value from the relevant `.Transport` method, or an exception
object).
.. note::
Transport auth method results are always themselves a ``list`` of "next
allowable authentication methods".
In the simple case of "you just authenticated successfully", it's an
empty list; if your auth was rejected but you're allowed to try again,
it will be a list of string method names like ``pubkey`` or
``password``.
The ``__str__`` of this class represents the empty-list scenario as the
word ``success``, which should make reading the result of an
authentication session more obvious to humans.
Instances also have a `strategy` attribute referencing the `AuthStrategy`
which was attempted.
"""
def __init__(self, strategy, *args, **kwargs):
self.strategy = strategy
super().__init__(*args, **kwargs)
def __str__(self):
# NOTE: meaningfully distinct from __repr__, which still wants to use
# superclass' implementation.
# TODO: go hog wild, use rich.Table? how is that on degraded term's?
# TODO: test this lol
return "\n".join(
f"{x.source} -> {x.result or 'success'}" for x in self
)
# TODO 4.0: descend from SSHException or even just Exception
class AuthFailure(AuthenticationException):
"""
Basic exception wrapping an `AuthResult` indicating overall auth failure.
Note that `AuthFailure` descends from `AuthenticationException` but is
generally "higher level"; the latter is now only raised by individual
`AuthSource` attempts and should typically only be seen by users when
encapsulated in this class. It subclasses `AuthenticationException`
primarily for backwards compatibility reasons.
"""
def __init__(self, result):
self.result = result
def __str__(self):
return "\n" + str(self.result)
class AuthStrategy:
"""
This class represents one or more attempts to auth with an SSH server.
By default, subclasses must at least accept an ``ssh_config``
(`.SSHConfig`) keyword argument, but may opt to accept more as needed for
their particular strategy.
"""
def __init__(
self,
ssh_config,
):
self.ssh_config = ssh_config
self.log = get_logger(__name__)
def get_sources(self):
"""
Generator yielding `AuthSource` instances, in the order to try.
This is the primary override point for subclasses: you figure out what
sources you need, and ``yield`` them.
Subclasses _of_ subclasses may find themselves wanting to do things
like filtering or discarding around a call to `super`.
"""
raise NotImplementedError
def authenticate(self, transport):
"""
Handles attempting `AuthSource` instances yielded from `get_sources`.
You *normally* won't need to override this, but it's an option for
advanced users.
"""
succeeded = False
overall_result = AuthResult(strategy=self)
# TODO: arguably we could fit in a "send none auth, record allowed auth
# types sent back" thing here as OpenSSH-client does, but that likely
# wants to live in fabric.OpenSSHAuthStrategy as not all target servers
# will implement it!
# TODO: needs better "server told us too many attempts" checking!
for source in self.get_sources():
self.log.debug(f"Trying {source}")
try: # NOTE: this really wants to _only_ wrap the authenticate()!
result = source.authenticate(transport)
succeeded = True
# TODO: 'except PartialAuthentication' is needed for 2FA and
# similar, as per old SSHClient.connect - it is the only way
# AuthHandler supplies access to the 'name-list' field from
# MSG_USERAUTH_FAILURE, at present.
except Exception as e:
result = e
# TODO: look at what this could possibly raise, we don't really
# want Exception here, right? just SSHException subclasses? or
# do we truly want to capture anything at all with assumption
# it's easy enough for users to look afterwards?
# NOTE: showing type, not message, for tersity & also most of
# the time it's basically just "Authentication failed."
source_class = e.__class__.__name__
self.log.info(
f"Authentication via {source} failed with {source_class}"
)
overall_result.append(SourceResult(source, result))
if succeeded:
break
# Gotta die here if nothing worked, otherwise Transport's main loop
# just kinda hangs out until something times out!
if not succeeded:
raise AuthFailure(result=overall_result)
# Success: give back what was done, in case they care.
return overall_result
# TODO: is there anything OpenSSH client does which _can't_ cleanly map to
# iterating a generator?

View File

@ -0,0 +1,139 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from paramiko.common import max_byte, zero_byte, byte_ord, byte_chr
import paramiko.util as util
from paramiko.util import b
from paramiko.sftp import int64
class BERException(Exception):
pass
class BER:
"""
Robey's tiny little attempt at a BER decoder.
"""
def __init__(self, content=bytes()):
self.content = b(content)
self.idx = 0
def asbytes(self):
return self.content
def __str__(self):
return self.asbytes()
def __repr__(self):
return "BER('" + repr(self.content) + "')"
def decode(self):
return self.decode_next()
def decode_next(self):
if self.idx >= len(self.content):
return None
ident = byte_ord(self.content[self.idx])
self.idx += 1
if (ident & 31) == 31:
# identifier > 30
ident = 0
while self.idx < len(self.content):
t = byte_ord(self.content[self.idx])
self.idx += 1
ident = (ident << 7) | (t & 0x7F)
if not (t & 0x80):
break
if self.idx >= len(self.content):
return None
# now fetch length
size = byte_ord(self.content[self.idx])
self.idx += 1
if size & 0x80:
# more complimicated...
# FIXME: theoretically should handle indefinite-length (0x80)
t = size & 0x7F
if self.idx + t > len(self.content):
return None
size = util.inflate_long(
self.content[self.idx : self.idx + t], True
)
self.idx += t
if self.idx + size > len(self.content):
# can't fit
return None
data = self.content[self.idx : self.idx + size]
self.idx += size
# now switch on id
if ident == 0x30:
# sequence
return self.decode_sequence(data)
elif ident == 2:
# int
return util.inflate_long(data)
else:
# 1: boolean (00 false, otherwise true)
msg = "Unknown ber encoding type {:d} (robey is lazy)"
raise BERException(msg.format(ident))
@staticmethod
def decode_sequence(data):
out = []
ber = BER(data)
while True:
x = ber.decode_next()
if x is None:
break
out.append(x)
return out
def encode_tlv(self, ident, val):
# no need to support ident > 31 here
self.content += byte_chr(ident)
if len(val) > 0x7F:
lenstr = util.deflate_long(len(val))
self.content += byte_chr(0x80 + len(lenstr)) + lenstr
else:
self.content += byte_chr(len(val))
self.content += val
def encode(self, x):
if type(x) is bool:
if x:
self.encode_tlv(1, max_byte)
else:
self.encode_tlv(1, zero_byte)
elif (type(x) is int) or (type(x) is int64):
self.encode_tlv(2, util.deflate_long(x))
elif type(x) is str:
self.encode_tlv(4, x)
elif (type(x) is list) or (type(x) is tuple):
self.encode_tlv(0x30, self.encode_sequence(x))
else:
raise BERException(
"Unknown type for encoding: {!r}".format(type(x))
)
@staticmethod
def encode_sequence(data):
ber = BER()
for item in data:
ber.encode(item)
return ber.asbytes()

View File

@ -0,0 +1,212 @@
# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Attempt to generalize the "feeder" part of a `.Channel`: an object which can be
read from and closed, but is reading from a buffer fed by another thread. The
read operations are blocking and can have a timeout set.
"""
import array
import threading
import time
from paramiko.util import b
class PipeTimeout(IOError):
"""
Indicates that a timeout was reached on a read from a `.BufferedPipe`.
"""
pass
class BufferedPipe:
"""
A buffer that obeys normal read (with timeout) & close semantics for a
file or socket, but is fed data from another thread. This is used by
`.Channel`.
"""
def __init__(self):
self._lock = threading.Lock()
self._cv = threading.Condition(self._lock)
self._event = None
self._buffer = array.array("B")
self._closed = False
def _buffer_frombytes(self, data):
self._buffer.frombytes(data)
def _buffer_tobytes(self, limit=None):
return self._buffer[:limit].tobytes()
def set_event(self, event):
"""
Set an event on this buffer. When data is ready to be read (or the
buffer has been closed), the event will be set. When no data is
ready, the event will be cleared.
:param threading.Event event: the event to set/clear
"""
self._lock.acquire()
try:
self._event = event
# Make sure the event starts in `set` state if we appear to already
# be closed; otherwise, if we start in `clear` state & are closed,
# nothing will ever call `.feed` and the event (& OS pipe, if we're
# wrapping one - see `Channel.fileno`) will permanently stay in
# `clear`, causing deadlock if e.g. `select`ed upon.
if self._closed or len(self._buffer) > 0:
event.set()
else:
event.clear()
finally:
self._lock.release()
def feed(self, data):
"""
Feed new data into this pipe. This method is assumed to be called
from a separate thread, so synchronization is done.
:param data: the data to add, as a ``str`` or ``bytes``
"""
self._lock.acquire()
try:
if self._event is not None:
self._event.set()
self._buffer_frombytes(b(data))
self._cv.notify_all()
finally:
self._lock.release()
def read_ready(self):
"""
Returns true if data is buffered and ready to be read from this
feeder. A ``False`` result does not mean that the feeder has closed;
it means you may need to wait before more data arrives.
:return:
``True`` if a `read` call would immediately return at least one
byte; ``False`` otherwise.
"""
self._lock.acquire()
try:
if len(self._buffer) == 0:
return False
return True
finally:
self._lock.release()
def read(self, nbytes, timeout=None):
"""
Read data from the pipe. The return value is a string representing
the data received. The maximum amount of data to be received at once
is specified by ``nbytes``. If a string of length zero is returned,
the pipe has been closed.
The optional ``timeout`` argument can be a nonnegative float expressing
seconds, or ``None`` for no timeout. If a float is given, a
`.PipeTimeout` will be raised if the timeout period value has elapsed
before any data arrives.
:param int nbytes: maximum number of bytes to read
:param float timeout:
maximum seconds to wait (or ``None``, the default, to wait forever)
:return: the read data, as a ``str`` or ``bytes``
:raises:
`.PipeTimeout` -- if a timeout was specified and no data was ready
before that timeout
"""
out = bytes()
self._lock.acquire()
try:
if len(self._buffer) == 0:
if self._closed:
return out
# should we block?
if timeout == 0.0:
raise PipeTimeout()
# loop here in case we get woken up but a different thread has
# grabbed everything in the buffer.
while (len(self._buffer) == 0) and not self._closed:
then = time.time()
self._cv.wait(timeout)
if timeout is not None:
timeout -= time.time() - then
if timeout <= 0.0:
raise PipeTimeout()
# something's in the buffer and we have the lock!
if len(self._buffer) <= nbytes:
out = self._buffer_tobytes()
del self._buffer[:]
if (self._event is not None) and not self._closed:
self._event.clear()
else:
out = self._buffer_tobytes(nbytes)
del self._buffer[:nbytes]
finally:
self._lock.release()
return out
def empty(self):
"""
Clear out the buffer and return all data that was in it.
:return:
any data that was in the buffer prior to clearing it out, as a
`str`
"""
self._lock.acquire()
try:
out = self._buffer_tobytes()
del self._buffer[:]
if (self._event is not None) and not self._closed:
self._event.clear()
return out
finally:
self._lock.release()
def close(self):
"""
Close this pipe object. Future calls to `read` after the buffer
has been emptied will return immediately with an empty string.
"""
self._lock.acquire()
try:
self._closed = True
self._cv.notify_all()
if self._event is not None:
self._event.set()
finally:
self._lock.release()
def __len__(self):
"""
Return the number of bytes buffered.
:return: number (`int`) of bytes buffered
"""
self._lock.acquire()
try:
return len(self._buffer)
finally:
self._lock.release()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,893 @@
# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
SSH client & key policies
"""
from binascii import hexlify
import getpass
import inspect
import os
import socket
import warnings
from errno import ECONNREFUSED, EHOSTUNREACH
from paramiko.agent import Agent
from paramiko.common import DEBUG
from paramiko.config import SSH_PORT
from paramiko.dsskey import DSSKey
from paramiko.ecdsakey import ECDSAKey
from paramiko.ed25519key import Ed25519Key
from paramiko.hostkeys import HostKeys
from paramiko.rsakey import RSAKey
from paramiko.ssh_exception import (
SSHException,
BadHostKeyException,
NoValidConnectionsError,
)
from paramiko.transport import Transport
from paramiko.util import ClosingContextManager
class SSHClient(ClosingContextManager):
"""
A high-level representation of a session with an SSH server. This class
wraps `.Transport`, `.Channel`, and `.SFTPClient` to take care of most
aspects of authenticating and opening channels. A typical use case is::
client = SSHClient()
client.load_system_host_keys()
client.connect('ssh.example.com')
stdin, stdout, stderr = client.exec_command('ls -l')
You may pass in explicit overrides for authentication and server host key
checking. The default mechanism is to try to use local key files or an
SSH agent (if one is running).
Instances of this class may be used as context managers.
.. versionadded:: 1.6
"""
def __init__(self):
"""
Create a new SSHClient.
"""
self._system_host_keys = HostKeys()
self._host_keys = HostKeys()
self._host_keys_filename = None
self._log_channel = None
self._policy = RejectPolicy()
self._transport = None
self._agent = None
def load_system_host_keys(self, filename=None):
"""
Load host keys from a system (read-only) file. Host keys read with
this method will not be saved back by `save_host_keys`.
This method can be called multiple times. Each new set of host keys
will be merged with the existing set (new replacing old if there are
conflicts).
If ``filename`` is left as ``None``, an attempt will be made to read
keys from the user's local "known hosts" file, as used by OpenSSH,
and no exception will be raised if the file can't be read. This is
probably only useful on posix.
:param str filename: the filename to read, or ``None``
:raises: ``IOError`` --
if a filename was provided and the file could not be read
"""
if filename is None:
# try the user's .ssh key file, and mask exceptions
filename = os.path.expanduser("~/.ssh/known_hosts")
try:
self._system_host_keys.load(filename)
except IOError:
pass
return
self._system_host_keys.load(filename)
def load_host_keys(self, filename):
"""
Load host keys from a local host-key file. Host keys read with this
method will be checked after keys loaded via `load_system_host_keys`,
but will be saved back by `save_host_keys` (so they can be modified).
The missing host key policy `.AutoAddPolicy` adds keys to this set and
saves them, when connecting to a previously-unknown server.
This method can be called multiple times. Each new set of host keys
will be merged with the existing set (new replacing old if there are
conflicts). When automatically saving, the last hostname is used.
:param str filename: the filename to read
:raises: ``IOError`` -- if the filename could not be read
"""
self._host_keys_filename = filename
self._host_keys.load(filename)
def save_host_keys(self, filename):
"""
Save the host keys back to a file. Only the host keys loaded with
`load_host_keys` (plus any added directly) will be saved -- not any
host keys loaded with `load_system_host_keys`.
:param str filename: the filename to save to
:raises: ``IOError`` -- if the file could not be written
"""
# update local host keys from file (in case other SSH clients
# have written to the known_hosts file meanwhile.
if self._host_keys_filename is not None:
self.load_host_keys(self._host_keys_filename)
with open(filename, "w") as f:
for hostname, keys in self._host_keys.items():
for keytype, key in keys.items():
f.write(
"{} {} {}\n".format(
hostname, keytype, key.get_base64()
)
)
def get_host_keys(self):
"""
Get the local `.HostKeys` object. This can be used to examine the
local host keys or change them.
:return: the local host keys as a `.HostKeys` object.
"""
return self._host_keys
def set_log_channel(self, name):
"""
Set the channel for logging. The default is ``"paramiko.transport"``
but it can be set to anything you want.
:param str name: new channel name for logging
"""
self._log_channel = name
def set_missing_host_key_policy(self, policy):
"""
Set policy to use when connecting to servers without a known host key.
Specifically:
* A **policy** is a "policy class" (or instance thereof), namely some
subclass of `.MissingHostKeyPolicy` such as `.RejectPolicy` (the
default), `.AutoAddPolicy`, `.WarningPolicy`, or a user-created
subclass.
* A host key is **known** when it appears in the client object's cached
host keys structures (those manipulated by `load_system_host_keys`
and/or `load_host_keys`).
:param .MissingHostKeyPolicy policy:
the policy to use when receiving a host key from a
previously-unknown server
"""
if inspect.isclass(policy):
policy = policy()
self._policy = policy
def _families_and_addresses(self, hostname, port):
"""
Yield pairs of address families and addresses to try for connecting.
:param str hostname: the server to connect to
:param int port: the server port to connect to
:returns: Yields an iterable of ``(family, address)`` tuples
"""
guess = True
addrinfos = socket.getaddrinfo(
hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM
)
for (family, socktype, proto, canonname, sockaddr) in addrinfos:
if socktype == socket.SOCK_STREAM:
yield family, sockaddr
guess = False
# some OS like AIX don't indicate SOCK_STREAM support, so just
# guess. :( We only do this if we did not get a single result marked
# as socktype == SOCK_STREAM.
if guess:
for family, _, _, _, sockaddr in addrinfos:
yield family, sockaddr
def connect(
self,
hostname,
port=SSH_PORT,
username=None,
password=None,
pkey=None,
key_filename=None,
timeout=None,
allow_agent=True,
look_for_keys=True,
compress=False,
sock=None,
gss_auth=False,
gss_kex=False,
gss_deleg_creds=True,
gss_host=None,
banner_timeout=None,
auth_timeout=None,
channel_timeout=None,
gss_trust_dns=True,
passphrase=None,
disabled_algorithms=None,
transport_factory=None,
auth_strategy=None,
):
"""
Connect to an SSH server and authenticate to it. The server's host key
is checked against the system host keys (see `load_system_host_keys`)
and any local host keys (`load_host_keys`). If the server's hostname
is not found in either set of host keys, the missing host key policy
is used (see `set_missing_host_key_policy`). The default policy is
to reject the key and raise an `.SSHException`.
Authentication is attempted in the following order of priority:
- The ``pkey`` or ``key_filename`` passed in (if any)
- ``key_filename`` may contain OpenSSH public certificate paths
as well as regular private-key paths; when files ending in
``-cert.pub`` are found, they are assumed to match a private
key, and both components will be loaded. (The private key
itself does *not* need to be listed in ``key_filename`` for
this to occur - *just* the certificate.)
- Any key we can find through an SSH agent
- Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in
``~/.ssh/``
- When OpenSSH-style public certificates exist that match an
existing such private key (so e.g. one has ``id_rsa`` and
``id_rsa-cert.pub``) the certificate will be loaded alongside
the private key and used for authentication.
- Plain username/password auth, if a password was given
If a private key requires a password to unlock it, and a password is
passed in, that password will be used to attempt to unlock the key.
:param str hostname: the server to connect to
:param int port: the server port to connect to
:param str username:
the username to authenticate as (defaults to the current local
username)
:param str password:
Used for password authentication; is also used for private key
decryption if ``passphrase`` is not given.
:param str passphrase:
Used for decrypting private keys.
:param .PKey pkey: an optional private key to use for authentication
:param str key_filename:
the filename, or list of filenames, of optional private key(s)
and/or certs to try for authentication
:param float timeout:
an optional timeout (in seconds) for the TCP connect
:param bool allow_agent:
set to False to disable connecting to the SSH agent
:param bool look_for_keys:
set to False to disable searching for discoverable private key
files in ``~/.ssh/``
:param bool compress: set to True to turn on compression
:param socket sock:
an open socket or socket-like object (such as a `.Channel`) to use
for communication to the target host
:param bool gss_auth:
``True`` if you want to use GSS-API authentication
:param bool gss_kex:
Perform GSS-API Key Exchange and user authentication
:param bool gss_deleg_creds: Delegate GSS-API client credentials or not
:param str gss_host:
The targets name in the kerberos database. default: hostname
:param bool gss_trust_dns:
Indicates whether or not the DNS is trusted to securely
canonicalize the name of the host being connected to (default
``True``).
:param float banner_timeout: an optional timeout (in seconds) to wait
for the SSH banner to be presented.
:param float auth_timeout: an optional timeout (in seconds) to wait for
an authentication response.
:param float channel_timeout: an optional timeout (in seconds) to wait
for a channel open response.
:param dict disabled_algorithms:
an optional dict passed directly to `.Transport` and its keyword
argument of the same name.
:param transport_factory:
an optional callable which is handed a subset of the constructor
arguments (primarily those related to the socket, GSS
functionality, and algorithm selection) and generates a
`.Transport` instance to be used by this client. Defaults to
`.Transport.__init__`.
:param auth_strategy:
an optional instance of `.AuthStrategy`, triggering use of this
newer authentication mechanism instead of SSHClient's legacy auth
method.
.. warning::
This parameter is **incompatible** with all other
authentication-related parameters (such as, but not limited to,
``password``, ``key_filename`` and ``allow_agent``) and will
trigger an exception if given alongside them.
:returns:
`.AuthResult` if ``auth_strategy`` is non-``None``; otherwise,
returns ``None``.
:raises BadHostKeyException:
if the server's host key could not be verified.
:raises AuthenticationException:
if authentication failed.
:raises UnableToAuthenticate:
if authentication failed (when ``auth_strategy`` is non-``None``;
and note that this is a subclass of ``AuthenticationException``).
:raises socket.error:
if a socket error (other than connection-refused or
host-unreachable) occurred while connecting.
:raises NoValidConnectionsError:
if all valid connection targets for the requested hostname (eg IPv4
and IPv6) yielded connection-refused or host-unreachable socket
errors.
:raises SSHException:
if there was any other error connecting or establishing an SSH
session.
.. versionchanged:: 1.15
Added the ``banner_timeout``, ``gss_auth``, ``gss_kex``,
``gss_deleg_creds`` and ``gss_host`` arguments.
.. versionchanged:: 2.3
Added the ``gss_trust_dns`` argument.
.. versionchanged:: 2.4
Added the ``passphrase`` argument.
.. versionchanged:: 2.6
Added the ``disabled_algorithms`` argument.
.. versionchanged:: 2.12
Added the ``transport_factory`` argument.
.. versionchanged:: 3.2
Added the ``auth_strategy`` argument.
"""
if not sock:
errors = {}
# Try multiple possible address families (e.g. IPv4 vs IPv6)
to_try = list(self._families_and_addresses(hostname, port))
for af, addr in to_try:
try:
sock = socket.socket(af, socket.SOCK_STREAM)
if timeout is not None:
try:
sock.settimeout(timeout)
except:
pass
sock.connect(addr)
# Break out of the loop on success
break
except socket.error as e:
# As mentioned in socket docs it is better
# to close sockets explicitly
if sock:
sock.close()
# Raise anything that isn't a straight up connection error
# (such as a resolution error)
if e.errno not in (ECONNREFUSED, EHOSTUNREACH):
raise
# Capture anything else so we know how the run looks once
# iteration is complete. Retain info about which attempt
# this was.
errors[addr] = e
# Make sure we explode usefully if no address family attempts
# succeeded. We've no way of knowing which error is the "right"
# one, so we construct a hybrid exception containing all the real
# ones, of a subclass that client code should still be watching for
# (socket.error)
if len(errors) == len(to_try):
raise NoValidConnectionsError(errors)
if transport_factory is None:
transport_factory = Transport
t = self._transport = transport_factory(
sock,
gss_kex=gss_kex,
gss_deleg_creds=gss_deleg_creds,
disabled_algorithms=disabled_algorithms,
)
t.use_compression(compress=compress)
t.set_gss_host(
# t.hostname may be None, but GSS-API requires a target name.
# Therefore use hostname as fallback.
gss_host=gss_host or hostname,
trust_dns=gss_trust_dns,
gssapi_requested=gss_auth or gss_kex,
)
if self._log_channel is not None:
t.set_log_channel(self._log_channel)
if banner_timeout is not None:
t.banner_timeout = banner_timeout
if auth_timeout is not None:
t.auth_timeout = auth_timeout
if channel_timeout is not None:
t.channel_timeout = channel_timeout
if port == SSH_PORT:
server_hostkey_name = hostname
else:
server_hostkey_name = "[{}]:{}".format(hostname, port)
our_server_keys = None
our_server_keys = self._system_host_keys.get(server_hostkey_name)
if our_server_keys is None:
our_server_keys = self._host_keys.get(server_hostkey_name)
if our_server_keys is not None:
keytype = our_server_keys.keys()[0]
sec_opts = t.get_security_options()
other_types = [x for x in sec_opts.key_types if x != keytype]
sec_opts.key_types = [keytype] + other_types
t.start_client(timeout=timeout)
# If GSS-API Key Exchange is performed we are not required to check the
# host key, because the host is authenticated via GSS-API / SSPI as
# well as our client.
if not self._transport.gss_kex_used:
server_key = t.get_remote_server_key()
if our_server_keys is None:
# will raise exception if the key is rejected
self._policy.missing_host_key(
self, server_hostkey_name, server_key
)
else:
our_key = our_server_keys.get(server_key.get_name())
if our_key != server_key:
if our_key is None:
our_key = list(our_server_keys.values())[0]
raise BadHostKeyException(hostname, server_key, our_key)
if username is None:
username = getpass.getuser()
# New auth flow!
if auth_strategy is not None:
return auth_strategy.authenticate(transport=t)
# Old auth flow!
if key_filename is None:
key_filenames = []
elif isinstance(key_filename, str):
key_filenames = [key_filename]
else:
key_filenames = key_filename
self._auth(
username,
password,
pkey,
key_filenames,
allow_agent,
look_for_keys,
gss_auth,
gss_kex,
gss_deleg_creds,
t.gss_host,
passphrase,
)
def close(self):
"""
Close this SSHClient and its underlying `.Transport`.
This should be called anytime you are done using the client object.
.. warning::
Paramiko registers garbage collection hooks that will try to
automatically close connections for you, but this is not presently
reliable. Failure to explicitly close your client after use may
lead to end-of-process hangs!
"""
if self._transport is None:
return
self._transport.close()
self._transport = None
if self._agent is not None:
self._agent.close()
self._agent = None
def exec_command(
self,
command,
bufsize=-1,
timeout=None,
get_pty=False,
environment=None,
):
"""
Execute a command on the SSH server. A new `.Channel` is opened and
the requested command is executed. The command's input and output
streams are returned as Python ``file``-like objects representing
stdin, stdout, and stderr.
:param str command: the command to execute
:param int bufsize:
interpreted the same way as by the built-in ``file()`` function in
Python
:param int timeout:
set command's channel timeout. See `.Channel.settimeout`
:param bool get_pty:
Request a pseudo-terminal from the server (default ``False``).
See `.Channel.get_pty`
:param dict environment:
a dict of shell environment variables, to be merged into the
default environment that the remote command executes within.
.. warning::
Servers may silently reject some environment variables; see the
warning in `.Channel.set_environment_variable` for details.
:return:
the stdin, stdout, and stderr of the executing command, as a
3-tuple
:raises: `.SSHException` -- if the server fails to execute the command
.. versionchanged:: 1.10
Added the ``get_pty`` kwarg.
"""
chan = self._transport.open_session(timeout=timeout)
if get_pty:
chan.get_pty()
chan.settimeout(timeout)
if environment:
chan.update_environment(environment)
chan.exec_command(command)
stdin = chan.makefile_stdin("wb", bufsize)
stdout = chan.makefile("r", bufsize)
stderr = chan.makefile_stderr("r", bufsize)
return stdin, stdout, stderr
def invoke_shell(
self,
term="vt100",
width=80,
height=24,
width_pixels=0,
height_pixels=0,
environment=None,
):
"""
Start an interactive shell session on the SSH server. A new `.Channel`
is opened and connected to a pseudo-terminal using the requested
terminal type and size.
:param str term:
the terminal type to emulate (for example, ``"vt100"``)
:param int width: the width (in characters) of the terminal window
:param int height: the height (in characters) of the terminal window
:param int width_pixels: the width (in pixels) of the terminal window
:param int height_pixels: the height (in pixels) of the terminal window
:param dict environment: the command's environment
:return: a new `.Channel` connected to the remote shell
:raises: `.SSHException` -- if the server fails to invoke a shell
"""
chan = self._transport.open_session()
chan.get_pty(term, width, height, width_pixels, height_pixels)
chan.invoke_shell()
return chan
def open_sftp(self):
"""
Open an SFTP session on the SSH server.
:return: a new `.SFTPClient` session object
"""
return self._transport.open_sftp_client()
def get_transport(self):
"""
Return the underlying `.Transport` object for this SSH connection.
This can be used to perform lower-level tasks, like opening specific
kinds of channels.
:return: the `.Transport` for this connection
"""
return self._transport
def _key_from_filepath(self, filename, klass, password):
"""
Attempt to derive a `.PKey` from given string path ``filename``:
- If ``filename`` appears to be a cert, the matching private key is
loaded.
- Otherwise, the filename is assumed to be a private key, and the
matching public cert will be loaded if it exists.
"""
cert_suffix = "-cert.pub"
# Assume privkey, not cert, by default
if filename.endswith(cert_suffix):
key_path = filename[: -len(cert_suffix)]
cert_path = filename
else:
key_path = filename
cert_path = filename + cert_suffix
# Blindly try the key path; if no private key, nothing will work.
key = klass.from_private_key_file(key_path, password)
# TODO: change this to 'Loading' instead of 'Trying' sometime; probably
# when #387 is released, since this is a critical log message users are
# likely testing/filtering for (bah.)
msg = "Trying discovered key {} in {}".format(
hexlify(key.get_fingerprint()), key_path
)
self._log(DEBUG, msg)
# Attempt to load cert if it exists.
if os.path.isfile(cert_path):
key.load_certificate(cert_path)
self._log(DEBUG, "Adding public certificate {}".format(cert_path))
return key
def _auth(
self,
username,
password,
pkey,
key_filenames,
allow_agent,
look_for_keys,
gss_auth,
gss_kex,
gss_deleg_creds,
gss_host,
passphrase,
):
"""
Try, in order:
- The key(s) passed in, if one was passed in.
- Any key we can find through an SSH agent (if allowed).
- Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in ~/.ssh/
(if allowed).
- Plain username/password auth, if a password was given.
(The password might be needed to unlock a private key [if 'passphrase'
isn't also given], or for two-factor authentication [for which it is
required].)
"""
saved_exception = None
two_factor = False
allowed_types = set()
two_factor_types = {"keyboard-interactive", "password"}
if passphrase is None and password is not None:
passphrase = password
# If GSS-API support and GSS-PI Key Exchange was performed, we attempt
# authentication with gssapi-keyex.
if gss_kex and self._transport.gss_kex_used:
try:
self._transport.auth_gssapi_keyex(username)
return
except Exception as e:
saved_exception = e
# Try GSS-API authentication (gssapi-with-mic) only if GSS-API Key
# Exchange is not performed, because if we use GSS-API for the key
# exchange, there is already a fully established GSS-API context, so
# why should we do that again?
if gss_auth:
try:
return self._transport.auth_gssapi_with_mic(
username, gss_host, gss_deleg_creds
)
except Exception as e:
saved_exception = e
if pkey is not None:
try:
self._log(
DEBUG,
"Trying SSH key {}".format(
hexlify(pkey.get_fingerprint())
),
)
allowed_types = set(
self._transport.auth_publickey(username, pkey)
)
two_factor = allowed_types & two_factor_types
if not two_factor:
return
except SSHException as e:
saved_exception = e
if not two_factor:
for key_filename in key_filenames:
# TODO 4.0: leverage PKey.from_path() if we don't end up just
# killing SSHClient entirely
for pkey_class in (RSAKey, DSSKey, ECDSAKey, Ed25519Key):
try:
key = self._key_from_filepath(
key_filename, pkey_class, passphrase
)
allowed_types = set(
self._transport.auth_publickey(username, key)
)
two_factor = allowed_types & two_factor_types
if not two_factor:
return
break
except SSHException as e:
saved_exception = e
if not two_factor and allow_agent:
if self._agent is None:
self._agent = Agent()
for key in self._agent.get_keys():
try:
id_ = hexlify(key.get_fingerprint())
self._log(DEBUG, "Trying SSH agent key {}".format(id_))
# for 2-factor auth a successfully auth'd key password
# will return an allowed 2fac auth method
allowed_types = set(
self._transport.auth_publickey(username, key)
)
two_factor = allowed_types & two_factor_types
if not two_factor:
return
break
except SSHException as e:
saved_exception = e
if not two_factor:
keyfiles = []
for keytype, name in [
(RSAKey, "rsa"),
(DSSKey, "dsa"),
(ECDSAKey, "ecdsa"),
(Ed25519Key, "ed25519"),
]:
# ~/ssh/ is for windows
for directory in [".ssh", "ssh"]:
full_path = os.path.expanduser(
"~/{}/id_{}".format(directory, name)
)
if os.path.isfile(full_path):
# TODO: only do this append if below did not run
keyfiles.append((keytype, full_path))
if os.path.isfile(full_path + "-cert.pub"):
keyfiles.append((keytype, full_path + "-cert.pub"))
if not look_for_keys:
keyfiles = []
for pkey_class, filename in keyfiles:
try:
key = self._key_from_filepath(
filename, pkey_class, passphrase
)
# for 2-factor auth a successfully auth'd key will result
# in ['password']
allowed_types = set(
self._transport.auth_publickey(username, key)
)
two_factor = allowed_types & two_factor_types
if not two_factor:
return
break
except (SSHException, IOError) as e:
saved_exception = e
if password is not None:
try:
self._transport.auth_password(username, password)
return
except SSHException as e:
saved_exception = e
elif two_factor:
try:
self._transport.auth_interactive_dumb(username)
return
except SSHException as e:
saved_exception = e
# if we got an auth-failed exception earlier, re-raise it
if saved_exception is not None:
raise saved_exception
raise SSHException("No authentication methods available")
def _log(self, level, msg):
self._transport._log(level, msg)
class MissingHostKeyPolicy:
"""
Interface for defining the policy that `.SSHClient` should use when the
SSH server's hostname is not in either the system host keys or the
application's keys. Pre-made classes implement policies for automatically
adding the key to the application's `.HostKeys` object (`.AutoAddPolicy`),
and for automatically rejecting the key (`.RejectPolicy`).
This function may be used to ask the user to verify the key, for example.
"""
def missing_host_key(self, client, hostname, key):
"""
Called when an `.SSHClient` receives a server key for a server that
isn't in either the system or local `.HostKeys` object. To accept
the key, simply return. To reject, raised an exception (which will
be passed to the calling application).
"""
pass
class AutoAddPolicy(MissingHostKeyPolicy):
"""
Policy for automatically adding the hostname and new host key to the
local `.HostKeys` object, and saving it. This is used by `.SSHClient`.
"""
def missing_host_key(self, client, hostname, key):
client._host_keys.add(hostname, key.get_name(), key)
if client._host_keys_filename is not None:
client.save_host_keys(client._host_keys_filename)
client._log(
DEBUG,
"Adding {} host key for {}: {}".format(
key.get_name(), hostname, hexlify(key.get_fingerprint())
),
)
class RejectPolicy(MissingHostKeyPolicy):
"""
Policy for automatically rejecting the unknown hostname & key. This is
used by `.SSHClient`.
"""
def missing_host_key(self, client, hostname, key):
client._log(
DEBUG,
"Rejecting {} host key for {}: {}".format(
key.get_name(), hostname, hexlify(key.get_fingerprint())
),
)
raise SSHException(
"Server {!r} not found in known_hosts".format(hostname)
)
class WarningPolicy(MissingHostKeyPolicy):
"""
Policy for logging a Python-style warning for an unknown host key, but
accepting it. This is used by `.SSHClient`.
"""
def missing_host_key(self, client, hostname, key):
warnings.warn(
"Unknown {} host key for {}: {}".format(
key.get_name(), hostname, hexlify(key.get_fingerprint())
)
)

View File

@ -0,0 +1,245 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Common constants and global variables.
"""
import logging
import struct
#
# Formerly of py3compat.py. May be fully delete'able with a deeper look?
#
def byte_chr(c):
assert isinstance(c, int)
return struct.pack("B", c)
def byte_mask(c, mask):
assert isinstance(c, int)
return struct.pack("B", c & mask)
def byte_ord(c):
# In case we're handed a string instead of an int.
if not isinstance(c, int):
c = ord(c)
return c
(
MSG_DISCONNECT,
MSG_IGNORE,
MSG_UNIMPLEMENTED,
MSG_DEBUG,
MSG_SERVICE_REQUEST,
MSG_SERVICE_ACCEPT,
MSG_EXT_INFO,
) = range(1, 8)
(MSG_KEXINIT, MSG_NEWKEYS) = range(20, 22)
(
MSG_USERAUTH_REQUEST,
MSG_USERAUTH_FAILURE,
MSG_USERAUTH_SUCCESS,
MSG_USERAUTH_BANNER,
) = range(50, 54)
MSG_USERAUTH_PK_OK = 60
(MSG_USERAUTH_INFO_REQUEST, MSG_USERAUTH_INFO_RESPONSE) = range(60, 62)
(MSG_USERAUTH_GSSAPI_RESPONSE, MSG_USERAUTH_GSSAPI_TOKEN) = range(60, 62)
(
MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE,
MSG_USERAUTH_GSSAPI_ERROR,
MSG_USERAUTH_GSSAPI_ERRTOK,
MSG_USERAUTH_GSSAPI_MIC,
) = range(63, 67)
HIGHEST_USERAUTH_MESSAGE_ID = 79
(MSG_GLOBAL_REQUEST, MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE) = range(80, 83)
(
MSG_CHANNEL_OPEN,
MSG_CHANNEL_OPEN_SUCCESS,
MSG_CHANNEL_OPEN_FAILURE,
MSG_CHANNEL_WINDOW_ADJUST,
MSG_CHANNEL_DATA,
MSG_CHANNEL_EXTENDED_DATA,
MSG_CHANNEL_EOF,
MSG_CHANNEL_CLOSE,
MSG_CHANNEL_REQUEST,
MSG_CHANNEL_SUCCESS,
MSG_CHANNEL_FAILURE,
) = range(90, 101)
cMSG_DISCONNECT = byte_chr(MSG_DISCONNECT)
cMSG_IGNORE = byte_chr(MSG_IGNORE)
cMSG_UNIMPLEMENTED = byte_chr(MSG_UNIMPLEMENTED)
cMSG_DEBUG = byte_chr(MSG_DEBUG)
cMSG_SERVICE_REQUEST = byte_chr(MSG_SERVICE_REQUEST)
cMSG_SERVICE_ACCEPT = byte_chr(MSG_SERVICE_ACCEPT)
cMSG_EXT_INFO = byte_chr(MSG_EXT_INFO)
cMSG_KEXINIT = byte_chr(MSG_KEXINIT)
cMSG_NEWKEYS = byte_chr(MSG_NEWKEYS)
cMSG_USERAUTH_REQUEST = byte_chr(MSG_USERAUTH_REQUEST)
cMSG_USERAUTH_FAILURE = byte_chr(MSG_USERAUTH_FAILURE)
cMSG_USERAUTH_SUCCESS = byte_chr(MSG_USERAUTH_SUCCESS)
cMSG_USERAUTH_BANNER = byte_chr(MSG_USERAUTH_BANNER)
cMSG_USERAUTH_PK_OK = byte_chr(MSG_USERAUTH_PK_OK)
cMSG_USERAUTH_INFO_REQUEST = byte_chr(MSG_USERAUTH_INFO_REQUEST)
cMSG_USERAUTH_INFO_RESPONSE = byte_chr(MSG_USERAUTH_INFO_RESPONSE)
cMSG_USERAUTH_GSSAPI_RESPONSE = byte_chr(MSG_USERAUTH_GSSAPI_RESPONSE)
cMSG_USERAUTH_GSSAPI_TOKEN = byte_chr(MSG_USERAUTH_GSSAPI_TOKEN)
cMSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE = byte_chr(
MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE
)
cMSG_USERAUTH_GSSAPI_ERROR = byte_chr(MSG_USERAUTH_GSSAPI_ERROR)
cMSG_USERAUTH_GSSAPI_ERRTOK = byte_chr(MSG_USERAUTH_GSSAPI_ERRTOK)
cMSG_USERAUTH_GSSAPI_MIC = byte_chr(MSG_USERAUTH_GSSAPI_MIC)
cMSG_GLOBAL_REQUEST = byte_chr(MSG_GLOBAL_REQUEST)
cMSG_REQUEST_SUCCESS = byte_chr(MSG_REQUEST_SUCCESS)
cMSG_REQUEST_FAILURE = byte_chr(MSG_REQUEST_FAILURE)
cMSG_CHANNEL_OPEN = byte_chr(MSG_CHANNEL_OPEN)
cMSG_CHANNEL_OPEN_SUCCESS = byte_chr(MSG_CHANNEL_OPEN_SUCCESS)
cMSG_CHANNEL_OPEN_FAILURE = byte_chr(MSG_CHANNEL_OPEN_FAILURE)
cMSG_CHANNEL_WINDOW_ADJUST = byte_chr(MSG_CHANNEL_WINDOW_ADJUST)
cMSG_CHANNEL_DATA = byte_chr(MSG_CHANNEL_DATA)
cMSG_CHANNEL_EXTENDED_DATA = byte_chr(MSG_CHANNEL_EXTENDED_DATA)
cMSG_CHANNEL_EOF = byte_chr(MSG_CHANNEL_EOF)
cMSG_CHANNEL_CLOSE = byte_chr(MSG_CHANNEL_CLOSE)
cMSG_CHANNEL_REQUEST = byte_chr(MSG_CHANNEL_REQUEST)
cMSG_CHANNEL_SUCCESS = byte_chr(MSG_CHANNEL_SUCCESS)
cMSG_CHANNEL_FAILURE = byte_chr(MSG_CHANNEL_FAILURE)
# for debugging:
MSG_NAMES = {
MSG_DISCONNECT: "disconnect",
MSG_IGNORE: "ignore",
MSG_UNIMPLEMENTED: "unimplemented",
MSG_DEBUG: "debug",
MSG_SERVICE_REQUEST: "service-request",
MSG_SERVICE_ACCEPT: "service-accept",
MSG_KEXINIT: "kexinit",
MSG_EXT_INFO: "ext-info",
MSG_NEWKEYS: "newkeys",
30: "kex30",
31: "kex31",
32: "kex32",
33: "kex33",
34: "kex34",
40: "kex40",
41: "kex41",
MSG_USERAUTH_REQUEST: "userauth-request",
MSG_USERAUTH_FAILURE: "userauth-failure",
MSG_USERAUTH_SUCCESS: "userauth-success",
MSG_USERAUTH_BANNER: "userauth--banner",
MSG_USERAUTH_PK_OK: "userauth-60(pk-ok/info-request)",
MSG_USERAUTH_INFO_RESPONSE: "userauth-info-response",
MSG_GLOBAL_REQUEST: "global-request",
MSG_REQUEST_SUCCESS: "request-success",
MSG_REQUEST_FAILURE: "request-failure",
MSG_CHANNEL_OPEN: "channel-open",
MSG_CHANNEL_OPEN_SUCCESS: "channel-open-success",
MSG_CHANNEL_OPEN_FAILURE: "channel-open-failure",
MSG_CHANNEL_WINDOW_ADJUST: "channel-window-adjust",
MSG_CHANNEL_DATA: "channel-data",
MSG_CHANNEL_EXTENDED_DATA: "channel-extended-data",
MSG_CHANNEL_EOF: "channel-eof",
MSG_CHANNEL_CLOSE: "channel-close",
MSG_CHANNEL_REQUEST: "channel-request",
MSG_CHANNEL_SUCCESS: "channel-success",
MSG_CHANNEL_FAILURE: "channel-failure",
MSG_USERAUTH_GSSAPI_RESPONSE: "userauth-gssapi-response",
MSG_USERAUTH_GSSAPI_TOKEN: "userauth-gssapi-token",
MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE: "userauth-gssapi-exchange-complete",
MSG_USERAUTH_GSSAPI_ERROR: "userauth-gssapi-error",
MSG_USERAUTH_GSSAPI_ERRTOK: "userauth-gssapi-error-token",
MSG_USERAUTH_GSSAPI_MIC: "userauth-gssapi-mic",
}
# authentication request return codes:
AUTH_SUCCESSFUL, AUTH_PARTIALLY_SUCCESSFUL, AUTH_FAILED = range(3)
# channel request failed reasons:
(
OPEN_SUCCEEDED,
OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
OPEN_FAILED_CONNECT_FAILED,
OPEN_FAILED_UNKNOWN_CHANNEL_TYPE,
OPEN_FAILED_RESOURCE_SHORTAGE,
) = range(0, 5)
CONNECTION_FAILED_CODE = {
1: "Administratively prohibited",
2: "Connect failed",
3: "Unknown channel type",
4: "Resource shortage",
}
(
DISCONNECT_SERVICE_NOT_AVAILABLE,
DISCONNECT_AUTH_CANCELLED_BY_USER,
DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
) = (7, 13, 14)
zero_byte = byte_chr(0)
one_byte = byte_chr(1)
four_byte = byte_chr(4)
max_byte = byte_chr(0xFF)
cr_byte = byte_chr(13)
linefeed_byte = byte_chr(10)
crlf = cr_byte + linefeed_byte
cr_byte_value = 13
linefeed_byte_value = 10
xffffffff = 0xFFFFFFFF
x80000000 = 0x80000000
o666 = 438
o660 = 432
o644 = 420
o600 = 384
o777 = 511
o700 = 448
o70 = 56
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
# Common IO/select/etc sleep period, in seconds
io_sleep = 0.01
DEFAULT_WINDOW_SIZE = 64 * 2**15
DEFAULT_MAX_PACKET_SIZE = 2**15
# lower bound on the max packet size we'll accept from the remote host
# Minimum packet size is 32768 bytes according to
# http://www.ietf.org/rfc/rfc4254.txt
MIN_WINDOW_SIZE = 2**15
# However, according to http://www.ietf.org/rfc/rfc4253.txt it is perfectly
# legal to accept a size much smaller, as OpenSSH client does as size 16384.
MIN_PACKET_SIZE = 2**12
# Max windows size according to http://www.ietf.org/rfc/rfc4254.txt
MAX_WINDOW_SIZE = 2**32 - 1

View File

@ -0,0 +1,40 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Compression implementations for a Transport.
"""
import zlib
class ZlibCompressor:
def __init__(self):
# Use the default level of zlib compression
self.z = zlib.compressobj()
def __call__(self, data):
return self.z.compress(data) + self.z.flush(zlib.Z_FULL_FLUSH)
class ZlibDecompressor:
def __init__(self):
self.z = zlib.decompressobj()
def __call__(self, data):
return self.z.decompress(data)

View File

@ -0,0 +1,696 @@
# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
# Copyright (C) 2012 Olle Lundberg <geek@nerd.sh>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Configuration file (aka ``ssh_config``) support.
"""
import fnmatch
import getpass
import os
import re
import shlex
import socket
from hashlib import sha1
from io import StringIO
from functools import partial
invoke, invoke_import_error = None, None
try:
import invoke
except ImportError as e:
invoke_import_error = e
from .ssh_exception import CouldNotCanonicalize, ConfigParseError
SSH_PORT = 22
class SSHConfig:
"""
Representation of config information as stored in the format used by
OpenSSH. Queries can be made via `lookup`. The format is described in
OpenSSH's ``ssh_config`` man page. This class is provided primarily as a
convenience to posix users (since the OpenSSH format is a de-facto
standard on posix) but should work fine on Windows too.
.. versionadded:: 1.6
"""
SETTINGS_REGEX = re.compile(r"(\w+)(?:\s*=\s*|\s+)(.+)")
# TODO: do a full scan of ssh.c & friends to make sure we're fully
# compatible across the board, e.g. OpenSSH 8.1 added %n to ProxyCommand.
TOKENS_BY_CONFIG_KEY = {
"controlpath": ["%C", "%h", "%l", "%L", "%n", "%p", "%r", "%u"],
"hostname": ["%h"],
"identityfile": ["%C", "~", "%d", "%h", "%l", "%u", "%r"],
"proxycommand": ["~", "%h", "%p", "%r"],
"proxyjump": ["%h", "%p", "%r"],
# Doesn't seem worth making this 'special' for now, it will fit well
# enough (no actual match-exec config key to be confused with).
"match-exec": ["%C", "%d", "%h", "%L", "%l", "%n", "%p", "%r", "%u"],
}
def __init__(self):
"""
Create a new OpenSSH config object.
Note: the newer alternate constructors `from_path`, `from_file` and
`from_text` are simpler to use, as they parse on instantiation. For
example, instead of::
config = SSHConfig()
config.parse(open("some-path.config")
you could::
config = SSHConfig.from_file(open("some-path.config"))
# Or more directly:
config = SSHConfig.from_path("some-path.config")
# Or if you have arbitrary ssh_config text from some other source:
config = SSHConfig.from_text("Host foo\\n\\tUser bar")
"""
self._config = []
@classmethod
def from_text(cls, text):
"""
Create a new, parsed `SSHConfig` from ``text`` string.
.. versionadded:: 2.7
"""
return cls.from_file(StringIO(text))
@classmethod
def from_path(cls, path):
"""
Create a new, parsed `SSHConfig` from the file found at ``path``.
.. versionadded:: 2.7
"""
with open(path) as flo:
return cls.from_file(flo)
@classmethod
def from_file(cls, flo):
"""
Create a new, parsed `SSHConfig` from file-like object ``flo``.
.. versionadded:: 2.7
"""
obj = cls()
obj.parse(flo)
return obj
def parse(self, file_obj):
"""
Read an OpenSSH config from the given file object.
:param file_obj: a file-like object to read the config file from
"""
# Start out w/ implicit/anonymous global host-like block to hold
# anything not contained by an explicit one.
context = {"host": ["*"], "config": {}}
for line in file_obj:
# Strip any leading or trailing whitespace from the line.
# Refer to https://github.com/paramiko/paramiko/issues/499
line = line.strip()
# Skip blanks, comments
if not line or line.startswith("#"):
continue
# Parse line into key, value
match = re.match(self.SETTINGS_REGEX, line)
if not match:
raise ConfigParseError("Unparsable line {}".format(line))
key = match.group(1).lower()
value = match.group(2)
# Host keyword triggers switch to new block/context
if key in ("host", "match"):
self._config.append(context)
context = {"config": {}}
if key == "host":
# TODO 4.0: make these real objects or at least name this
# "hosts" to acknowledge it's an iterable. (Doing so prior
# to 3.0, despite it being a private API, feels bad -
# surely such an old codebase has folks actually relying on
# these keys.)
context["host"] = self._get_hosts(value)
else:
context["matches"] = self._get_matches(value)
# Special-case for noop ProxyCommands
elif key == "proxycommand" and value.lower() == "none":
# Store 'none' as None - not as a string implying that the
# proxycommand is the literal shell command "none"!
context["config"][key] = None
# All other keywords get stored, directly or via append
else:
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
# identityfile, localforward, remoteforward keys are special
# cases, since they are allowed to be specified multiple times
# and they should be tried in order of specification.
if key in ["identityfile", "localforward", "remoteforward"]:
if key in context["config"]:
context["config"][key].append(value)
else:
context["config"][key] = [value]
elif key not in context["config"]:
context["config"][key] = value
# Store last 'open' block and we're done
self._config.append(context)
def lookup(self, hostname):
"""
Return a dict (`SSHConfigDict`) of config options for a given hostname.
The host-matching rules of OpenSSH's ``ssh_config`` man page are used:
For each parameter, the first obtained value will be used. The
configuration files contain sections separated by ``Host`` and/or
``Match`` specifications, and that section is only applied for hosts
which match the given patterns or keywords
Since the first obtained value for each parameter is used, more host-
specific declarations should be given near the beginning of the file,
and general defaults at the end.
The keys in the returned dict are all normalized to lowercase (look for
``"port"``, not ``"Port"``. The values are processed according to the
rules for substitution variable expansion in ``ssh_config``.
Finally, please see the docs for `SSHConfigDict` for deeper info on
features such as optional type conversion methods, e.g.::
conf = my_config.lookup('myhost')
assert conf['passwordauthentication'] == 'yes'
assert conf.as_bool('passwordauthentication') is True
.. note::
If there is no explicitly configured ``HostName`` value, it will be
set to the being-looked-up hostname, which is as close as we can
get to OpenSSH's behavior around that particular option.
:param str hostname: the hostname to lookup
.. versionchanged:: 2.5
Returns `SSHConfigDict` objects instead of dict literals.
.. versionchanged:: 2.7
Added canonicalization support.
.. versionchanged:: 2.7
Added ``Match`` support.
.. versionchanged:: 3.3
Added ``Match final`` support.
"""
# First pass
options = self._lookup(hostname=hostname)
# Inject HostName if it was not set (this used to be done incidentally
# during tokenization, for some reason).
if "hostname" not in options:
options["hostname"] = hostname
# Handle canonicalization
canon = options.get("canonicalizehostname", None) in ("yes", "always")
maxdots = int(options.get("canonicalizemaxdots", 1))
if canon and hostname.count(".") <= maxdots:
# NOTE: OpenSSH manpage does not explicitly state this, but its
# implementation for CanonicalDomains is 'split on any whitespace'.
domains = options["canonicaldomains"].split()
hostname = self.canonicalize(hostname, options, domains)
# Overwrite HostName again here (this is also what OpenSSH does)
options["hostname"] = hostname
options = self._lookup(
hostname, options, canonical=True, final=True
)
else:
options = self._lookup(
hostname, options, canonical=False, final=True
)
return options
def _lookup(self, hostname, options=None, canonical=False, final=False):
# Init
if options is None:
options = SSHConfigDict()
# Iterate all stanzas, applying any that match, in turn (so that things
# like Match can reference currently understood state)
for context in self._config:
if not (
self._pattern_matches(context.get("host", []), hostname)
or self._does_match(
context.get("matches", []),
hostname,
canonical,
final,
options,
)
):
continue
for key, value in context["config"].items():
if key not in options:
# Create a copy of the original value,
# else it will reference the original list
# in self._config and update that value too
# when the extend() is being called.
options[key] = value[:] if value is not None else value
elif key == "identityfile":
options[key].extend(
x for x in value if x not in options[key]
)
if final:
# Expand variables in resulting values
# (besides 'Match exec' which was already handled above)
options = self._expand_variables(options, hostname)
return options
def canonicalize(self, hostname, options, domains):
"""
Return canonicalized version of ``hostname``.
:param str hostname: Target hostname.
:param options: An `SSHConfigDict` from a previous lookup pass.
:param domains: List of domains (e.g. ``["paramiko.org"]``).
:returns: A canonicalized hostname if one was found, else ``None``.
.. versionadded:: 2.7
"""
found = False
for domain in domains:
candidate = "{}.{}".format(hostname, domain)
family_specific = _addressfamily_host_lookup(candidate, options)
if family_specific is not None:
# TODO: would we want to dig deeper into other results? e.g. to
# find something that satisfies PermittedCNAMEs when that is
# implemented?
found = family_specific[0]
else:
# TODO: what does ssh use here and is there a reason to use
# that instead of gethostbyname?
try:
found = socket.gethostbyname(candidate)
except socket.gaierror:
pass
if found:
# TODO: follow CNAME (implied by found != candidate?) if
# CanonicalizePermittedCNAMEs allows it
return candidate
# If we got here, it means canonicalization failed.
# When CanonicalizeFallbackLocal is undefined or 'yes', we just spit
# back the original hostname.
if options.get("canonicalizefallbacklocal", "yes") == "yes":
return hostname
# And here, we failed AND fallback was set to a non-yes value, so we
# need to get mad.
raise CouldNotCanonicalize(hostname)
def get_hostnames(self):
"""
Return the set of literal hostnames defined in the SSH config (both
explicit hostnames and wildcard entries).
"""
hosts = set()
for entry in self._config:
hosts.update(entry["host"])
return hosts
def _pattern_matches(self, patterns, target):
# Convenience auto-splitter if not already a list
if hasattr(patterns, "split"):
patterns = patterns.split(",")
match = False
for pattern in patterns:
# Short-circuit if target matches a negated pattern
if pattern.startswith("!") and fnmatch.fnmatch(
target, pattern[1:]
):
return False
# Flag a match, but continue (in case of later negation) if regular
# match occurs
elif fnmatch.fnmatch(target, pattern):
match = True
return match
def _does_match(
self, match_list, target_hostname, canonical, final, options
):
matched = []
candidates = match_list[:]
local_username = getpass.getuser()
while candidates:
candidate = candidates.pop(0)
passed = None
# Obtain latest host/user value every loop, so later Match may
# reference values assigned within a prior Match.
configured_host = options.get("hostname", None)
configured_user = options.get("user", None)
type_, param = candidate["type"], candidate["param"]
# Canonical is a hard pass/fail based on whether this is a
# canonicalized re-lookup.
if type_ == "canonical":
if self._should_fail(canonical, candidate):
return False
if type_ == "final":
passed = final
# The parse step ensures we only see this by itself or after
# canonical, so it's also an easy hard pass. (No negation here as
# that would be uh, pretty weird?)
elif type_ == "all":
return True
# From here, we are testing various non-hard criteria,
# short-circuiting only on fail
elif type_ == "host":
hostval = configured_host or target_hostname
passed = self._pattern_matches(param, hostval)
elif type_ == "originalhost":
passed = self._pattern_matches(param, target_hostname)
elif type_ == "user":
user = configured_user or local_username
passed = self._pattern_matches(param, user)
elif type_ == "localuser":
passed = self._pattern_matches(param, local_username)
elif type_ == "exec":
exec_cmd = self._tokenize(
options, target_hostname, "match-exec", param
)
# This is the laziest spot in which we can get mad about an
# inability to import Invoke.
if invoke is None:
raise invoke_import_error
# Like OpenSSH, we 'redirect' stdout but let stderr bubble up
passed = invoke.run(exec_cmd, hide="stdout", warn=True).ok
# Tackle any 'passed, but was negated' results from above
if passed is not None and self._should_fail(passed, candidate):
return False
# Made it all the way here? Everything matched!
matched.append(candidate)
# Did anything match? (To be treated as bool, usually.)
return matched
def _should_fail(self, would_pass, candidate):
return would_pass if candidate["negate"] else not would_pass
def _tokenize(self, config, target_hostname, key, value):
"""
Tokenize a string based on current config/hostname data.
:param config: Current config data.
:param target_hostname: Original target connection hostname.
:param key: Config key being tokenized (used to filter token list).
:param value: Config value being tokenized.
:returns: The tokenized version of the input ``value`` string.
"""
allowed_tokens = self._allowed_tokens(key)
# Short-circuit if no tokenization possible
if not allowed_tokens:
return value
# Obtain potentially configured hostname, for use with %h.
# Special-case where we are tokenizing the hostname itself, to avoid
# replacing %h with a %h-bearing value, etc.
configured_hostname = target_hostname
if key != "hostname":
configured_hostname = config.get("hostname", configured_hostname)
# Ditto the rest of the source values
if "port" in config:
port = config["port"]
else:
port = SSH_PORT
user = getpass.getuser()
if "user" in config:
remoteuser = config["user"]
else:
remoteuser = user
local_hostname = socket.gethostname().split(".")[0]
local_fqdn = LazyFqdn(config, local_hostname)
homedir = os.path.expanduser("~")
tohash = local_hostname + target_hostname + repr(port) + remoteuser
# The actual tokens!
replacements = {
# TODO: %%???
"%C": sha1(tohash.encode()).hexdigest(),
"%d": homedir,
"%h": configured_hostname,
# TODO: %i?
"%L": local_hostname,
"%l": local_fqdn,
# also this is pseudo buggy when not in Match exec mode so document
# that. also WHY is that the case?? don't we do all of this late?
"%n": target_hostname,
"%p": port,
"%r": remoteuser,
# TODO: %T? don't believe this is possible however
"%u": user,
"~": homedir,
}
# Do the thing with the stuff
tokenized = value
for find, replace in replacements.items():
if find not in allowed_tokens:
continue
tokenized = tokenized.replace(find, str(replace))
# TODO: log? eg that value -> tokenized
return tokenized
def _allowed_tokens(self, key):
"""
Given config ``key``, return list of token strings to tokenize.
.. note::
This feels like it wants to eventually go away, but is used to
preserve as-strict-as-possible compatibility with OpenSSH, which
for whatever reason only applies some tokens to some config keys.
"""
return self.TOKENS_BY_CONFIG_KEY.get(key, [])
def _expand_variables(self, config, target_hostname):
"""
Return a dict of config options with expanded substitutions
for a given original & current target hostname.
Please refer to :doc:`/api/config` for details.
:param dict config: the currently parsed config
:param str hostname: the hostname whose config is being looked up
"""
for k in config:
if config[k] is None:
continue
tokenizer = partial(self._tokenize, config, target_hostname, k)
if isinstance(config[k], list):
for i, value in enumerate(config[k]):
config[k][i] = tokenizer(value)
else:
config[k] = tokenizer(config[k])
return config
def _get_hosts(self, host):
"""
Return a list of host_names from host value.
"""
try:
return shlex.split(host)
except ValueError:
raise ConfigParseError("Unparsable host {}".format(host))
def _get_matches(self, match):
"""
Parse a specific Match config line into a list-of-dicts for its values.
Performs some parse-time validation as well.
"""
matches = []
tokens = shlex.split(match)
while tokens:
match = {"type": None, "param": None, "negate": False}
type_ = tokens.pop(0)
# Handle per-keyword negation
if type_.startswith("!"):
match["negate"] = True
type_ = type_[1:]
match["type"] = type_
# all/canonical have no params (everything else does)
if type_ in ("all", "canonical", "final"):
matches.append(match)
continue
if not tokens:
raise ConfigParseError(
"Missing parameter to Match '{}' keyword".format(type_)
)
match["param"] = tokens.pop(0)
matches.append(match)
# Perform some (easier to do now than in the middle) validation that is
# better handled here than at lookup time.
keywords = [x["type"] for x in matches]
if "all" in keywords:
allowable = ("all", "canonical")
ok, bad = (
list(filter(lambda x: x in allowable, keywords)),
list(filter(lambda x: x not in allowable, keywords)),
)
err = None
if any(bad):
err = "Match does not allow 'all' mixed with anything but 'canonical'" # noqa
elif "canonical" in ok and ok.index("canonical") > ok.index("all"):
err = "Match does not allow 'all' before 'canonical'"
if err is not None:
raise ConfigParseError(err)
return matches
def _addressfamily_host_lookup(hostname, options):
"""
Try looking up ``hostname`` in an IPv4 or IPv6 specific manner.
This is an odd duck due to needing use in two divergent use cases. It looks
up ``AddressFamily`` in ``options`` and if it is ``inet`` or ``inet6``,
this function uses `socket.getaddrinfo` to perform a family-specific
lookup, returning the result if successful.
In any other situation -- lookup failure, or ``AddressFamily`` being
unspecified or ``any`` -- ``None`` is returned instead and the caller is
expected to do something situation-appropriate like calling
`socket.gethostbyname`.
:param str hostname: Hostname to look up.
:param options: `SSHConfigDict` instance w/ parsed options.
:returns: ``getaddrinfo``-style tuples, or ``None``, depending.
"""
address_family = options.get("addressfamily", "any").lower()
if address_family == "any":
return
try:
family = socket.AF_INET6
if address_family == "inet":
family = socket.AF_INET
return socket.getaddrinfo(
hostname,
None,
family,
socket.SOCK_DGRAM,
socket.IPPROTO_IP,
socket.AI_CANONNAME,
)
except socket.gaierror:
pass
class LazyFqdn:
"""
Returns the host's fqdn on request as string.
"""
def __init__(self, config, host=None):
self.fqdn = None
self.config = config
self.host = host
def __str__(self):
if self.fqdn is None:
#
# If the SSH config contains AddressFamily, use that when
# determining the local host's FQDN. Using socket.getfqdn() from
# the standard library is the most general solution, but can
# result in noticeable delays on some platforms when IPv6 is
# misconfigured or not available, as it calls getaddrinfo with no
# address family specified, so both IPv4 and IPv6 are checked.
#
# Handle specific option
fqdn = None
results = _addressfamily_host_lookup(self.host, self.config)
if results is not None:
for res in results:
af, socktype, proto, canonname, sa = res
if canonname and "." in canonname:
fqdn = canonname
break
# Handle 'any' / unspecified / lookup failure
if fqdn is None:
fqdn = socket.getfqdn()
# Cache
self.fqdn = fqdn
return self.fqdn
class SSHConfigDict(dict):
"""
A dictionary wrapper/subclass for per-host configuration structures.
This class introduces some usage niceties for consumers of `SSHConfig`,
specifically around the issue of variable type conversions: normal value
access yields strings, but there are now methods such as `as_bool` and
`as_int` that yield casted values instead.
For example, given the following ``ssh_config`` file snippet::
Host foo.example.com
PasswordAuthentication no
Compression yes
ServerAliveInterval 60
the following code highlights how you can access the raw strings as well as
usefully Python type-casted versions (recalling that keys are all
normalized to lowercase first)::
my_config = SSHConfig()
my_config.parse(open('~/.ssh/config'))
conf = my_config.lookup('foo.example.com')
assert conf['passwordauthentication'] == 'no'
assert conf.as_bool('passwordauthentication') is False
assert conf['compression'] == 'yes'
assert conf.as_bool('compression') is True
assert conf['serveraliveinterval'] == '60'
assert conf.as_int('serveraliveinterval') == 60
.. versionadded:: 2.5
"""
def as_bool(self, key):
"""
Express given key's value as a boolean type.
Typically, this is used for ``ssh_config``'s pseudo-boolean values
which are either ``"yes"`` or ``"no"``. In such cases, ``"yes"`` yields
``True`` and any other value becomes ``False``.
.. note::
If (for whatever reason) the stored value is already boolean in
nature, it's simply returned.
.. versionadded:: 2.5
"""
val = self[key]
if isinstance(val, bool):
return val
return val.lower() == "yes"
def as_int(self, key):
"""
Express given key's value as an integer, if possible.
This method will raise ``ValueError`` or similar if the value is not
int-appropriate, same as the builtin `int` type.
.. versionadded:: 2.5
"""
return int(self[key])

View File

@ -0,0 +1,258 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
DSS keys.
"""
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import dsa
from cryptography.hazmat.primitives.asymmetric.utils import (
decode_dss_signature,
encode_dss_signature,
)
from paramiko import util
from paramiko.common import zero_byte
from paramiko.ssh_exception import SSHException
from paramiko.message import Message
from paramiko.ber import BER, BERException
from paramiko.pkey import PKey
class DSSKey(PKey):
"""
Representation of a DSS key which can be used to sign an verify SSH2
data.
"""
name = "ssh-dss"
def __init__(
self,
msg=None,
data=None,
filename=None,
password=None,
vals=None,
file_obj=None,
):
self.p = None
self.q = None
self.g = None
self.y = None
self.x = None
self.public_blob = None
if file_obj is not None:
self._from_private_key(file_obj, password)
return
if filename is not None:
self._from_private_key_file(filename, password)
return
if (msg is None) and (data is not None):
msg = Message(data)
if vals is not None:
self.p, self.q, self.g, self.y = vals
else:
self._check_type_and_load_cert(
msg=msg,
key_type=self.name,
cert_type=f"{self.name}-cert-v01@openssh.com",
)
self.p = msg.get_mpint()
self.q = msg.get_mpint()
self.g = msg.get_mpint()
self.y = msg.get_mpint()
self.size = util.bit_length(self.p)
def asbytes(self):
m = Message()
m.add_string(self.name)
m.add_mpint(self.p)
m.add_mpint(self.q)
m.add_mpint(self.g)
m.add_mpint(self.y)
return m.asbytes()
def __str__(self):
return self.asbytes()
@property
def _fields(self):
return (self.get_name(), self.p, self.q, self.g, self.y)
# TODO 4.0: remove
def get_name(self):
return self.name
def get_bits(self):
return self.size
def can_sign(self):
return self.x is not None
def sign_ssh_data(self, data, algorithm=None):
key = dsa.DSAPrivateNumbers(
x=self.x,
public_numbers=dsa.DSAPublicNumbers(
y=self.y,
parameter_numbers=dsa.DSAParameterNumbers(
p=self.p, q=self.q, g=self.g
),
),
).private_key(backend=default_backend())
sig = key.sign(data, hashes.SHA1())
r, s = decode_dss_signature(sig)
m = Message()
m.add_string(self.name)
# apparently, in rare cases, r or s may be shorter than 20 bytes!
rstr = util.deflate_long(r, 0)
sstr = util.deflate_long(s, 0)
if len(rstr) < 20:
rstr = zero_byte * (20 - len(rstr)) + rstr
if len(sstr) < 20:
sstr = zero_byte * (20 - len(sstr)) + sstr
m.add_string(rstr + sstr)
return m
def verify_ssh_sig(self, data, msg):
if len(msg.asbytes()) == 40:
# spies.com bug: signature has no header
sig = msg.asbytes()
else:
kind = msg.get_text()
if kind != self.name:
return 0
sig = msg.get_binary()
# pull out (r, s) which are NOT encoded as mpints
sigR = util.inflate_long(sig[:20], 1)
sigS = util.inflate_long(sig[20:], 1)
signature = encode_dss_signature(sigR, sigS)
key = dsa.DSAPublicNumbers(
y=self.y,
parameter_numbers=dsa.DSAParameterNumbers(
p=self.p, q=self.q, g=self.g
),
).public_key(backend=default_backend())
try:
key.verify(signature, data, hashes.SHA1())
except InvalidSignature:
return False
else:
return True
def write_private_key_file(self, filename, password=None):
key = dsa.DSAPrivateNumbers(
x=self.x,
public_numbers=dsa.DSAPublicNumbers(
y=self.y,
parameter_numbers=dsa.DSAParameterNumbers(
p=self.p, q=self.q, g=self.g
),
),
).private_key(backend=default_backend())
self._write_private_key_file(
filename,
key,
serialization.PrivateFormat.TraditionalOpenSSL,
password=password,
)
def write_private_key(self, file_obj, password=None):
key = dsa.DSAPrivateNumbers(
x=self.x,
public_numbers=dsa.DSAPublicNumbers(
y=self.y,
parameter_numbers=dsa.DSAParameterNumbers(
p=self.p, q=self.q, g=self.g
),
),
).private_key(backend=default_backend())
self._write_private_key(
file_obj,
key,
serialization.PrivateFormat.TraditionalOpenSSL,
password=password,
)
@staticmethod
def generate(bits=1024, progress_func=None):
"""
Generate a new private DSS key. This factory function can be used to
generate a new host key or authentication key.
:param int bits: number of bits the generated key should be.
:param progress_func: Unused
:return: new `.DSSKey` private key
"""
numbers = dsa.generate_private_key(
bits, backend=default_backend()
).private_numbers()
key = DSSKey(
vals=(
numbers.public_numbers.parameter_numbers.p,
numbers.public_numbers.parameter_numbers.q,
numbers.public_numbers.parameter_numbers.g,
numbers.public_numbers.y,
)
)
key.x = numbers.x
return key
# ...internals...
def _from_private_key_file(self, filename, password):
data = self._read_private_key_file("DSA", filename, password)
self._decode_key(data)
def _from_private_key(self, file_obj, password):
data = self._read_private_key("DSA", file_obj, password)
self._decode_key(data)
def _decode_key(self, data):
pkformat, data = data
# private key file contains:
# DSAPrivateKey = { version = 0, p, q, g, y, x }
if pkformat == self._PRIVATE_KEY_FORMAT_ORIGINAL:
try:
keylist = BER(data).decode()
except BERException as e:
raise SSHException("Unable to parse key file: {}".format(e))
elif pkformat == self._PRIVATE_KEY_FORMAT_OPENSSH:
keylist = self._uint32_cstruct_unpack(data, "iiiii")
keylist = [0] + list(keylist)
else:
self._got_bad_key_format_id(pkformat)
if type(keylist) is not list or len(keylist) < 6 or keylist[0] != 0:
raise SSHException(
"not a valid DSA private key file (bad ber encoding)"
)
self.p = keylist[1]
self.q = keylist[2]
self.g = keylist[3]
self.y = keylist[4]
self.x = keylist[5]
self.size = util.bit_length(self.p)

View File

@ -0,0 +1,339 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
ECDSA keys
"""
from cryptography.exceptions import InvalidSignature, UnsupportedAlgorithm
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.utils import (
decode_dss_signature,
encode_dss_signature,
)
from paramiko.common import four_byte
from paramiko.message import Message
from paramiko.pkey import PKey
from paramiko.ssh_exception import SSHException
from paramiko.util import deflate_long
class _ECDSACurve:
"""
Represents a specific ECDSA Curve (nistp256, nistp384, etc).
Handles the generation of the key format identifier and the selection of
the proper hash function. Also grabs the proper curve from the 'ecdsa'
package.
"""
def __init__(self, curve_class, nist_name):
self.nist_name = nist_name
self.key_length = curve_class.key_size
# Defined in RFC 5656 6.2
self.key_format_identifier = "ecdsa-sha2-" + self.nist_name
# Defined in RFC 5656 6.2.1
if self.key_length <= 256:
self.hash_object = hashes.SHA256
elif self.key_length <= 384:
self.hash_object = hashes.SHA384
else:
self.hash_object = hashes.SHA512
self.curve_class = curve_class
class _ECDSACurveSet:
"""
A collection to hold the ECDSA curves. Allows querying by oid and by key
format identifier. The two ways in which ECDSAKey needs to be able to look
up curves.
"""
def __init__(self, ecdsa_curves):
self.ecdsa_curves = ecdsa_curves
def get_key_format_identifier_list(self):
return [curve.key_format_identifier for curve in self.ecdsa_curves]
def get_by_curve_class(self, curve_class):
for curve in self.ecdsa_curves:
if curve.curve_class == curve_class:
return curve
def get_by_key_format_identifier(self, key_format_identifier):
for curve in self.ecdsa_curves:
if curve.key_format_identifier == key_format_identifier:
return curve
def get_by_key_length(self, key_length):
for curve in self.ecdsa_curves:
if curve.key_length == key_length:
return curve
class ECDSAKey(PKey):
"""
Representation of an ECDSA key which can be used to sign and verify SSH2
data.
"""
_ECDSA_CURVES = _ECDSACurveSet(
[
_ECDSACurve(ec.SECP256R1, "nistp256"),
_ECDSACurve(ec.SECP384R1, "nistp384"),
_ECDSACurve(ec.SECP521R1, "nistp521"),
]
)
def __init__(
self,
msg=None,
data=None,
filename=None,
password=None,
vals=None,
file_obj=None,
# TODO 4.0: remove; it does nothing since porting to cryptography.io
validate_point=True,
):
self.verifying_key = None
self.signing_key = None
self.public_blob = None
if file_obj is not None:
self._from_private_key(file_obj, password)
return
if filename is not None:
self._from_private_key_file(filename, password)
return
if (msg is None) and (data is not None):
msg = Message(data)
if vals is not None:
self.signing_key, self.verifying_key = vals
c_class = self.signing_key.curve.__class__
self.ecdsa_curve = self._ECDSA_CURVES.get_by_curve_class(c_class)
else:
# Must set ecdsa_curve first; subroutines called herein may need to
# spit out our get_name(), which relies on this.
key_type = msg.get_text()
# But this also means we need to hand it a real key/curve
# identifier, so strip out any cert business. (NOTE: could push
# that into _ECDSACurveSet.get_by_key_format_identifier(), but it
# feels more correct to do it here?)
suffix = "-cert-v01@openssh.com"
if key_type.endswith(suffix):
key_type = key_type[: -len(suffix)]
self.ecdsa_curve = self._ECDSA_CURVES.get_by_key_format_identifier(
key_type
)
key_types = self._ECDSA_CURVES.get_key_format_identifier_list()
cert_types = [
"{}-cert-v01@openssh.com".format(x) for x in key_types
]
self._check_type_and_load_cert(
msg=msg, key_type=key_types, cert_type=cert_types
)
curvename = msg.get_text()
if curvename != self.ecdsa_curve.nist_name:
raise SSHException(
"Can't handle curve of type {}".format(curvename)
)
pointinfo = msg.get_binary()
try:
key = ec.EllipticCurvePublicKey.from_encoded_point(
self.ecdsa_curve.curve_class(), pointinfo
)
self.verifying_key = key
except ValueError:
raise SSHException("Invalid public key")
@classmethod
def identifiers(cls):
return cls._ECDSA_CURVES.get_key_format_identifier_list()
# TODO 4.0: deprecate/remove
@classmethod
def supported_key_format_identifiers(cls):
return cls.identifiers()
def asbytes(self):
key = self.verifying_key
m = Message()
m.add_string(self.ecdsa_curve.key_format_identifier)
m.add_string(self.ecdsa_curve.nist_name)
numbers = key.public_numbers()
key_size_bytes = (key.curve.key_size + 7) // 8
x_bytes = deflate_long(numbers.x, add_sign_padding=False)
x_bytes = b"\x00" * (key_size_bytes - len(x_bytes)) + x_bytes
y_bytes = deflate_long(numbers.y, add_sign_padding=False)
y_bytes = b"\x00" * (key_size_bytes - len(y_bytes)) + y_bytes
point_str = four_byte + x_bytes + y_bytes
m.add_string(point_str)
return m.asbytes()
def __str__(self):
return self.asbytes()
@property
def _fields(self):
return (
self.get_name(),
self.verifying_key.public_numbers().x,
self.verifying_key.public_numbers().y,
)
def get_name(self):
return self.ecdsa_curve.key_format_identifier
def get_bits(self):
return self.ecdsa_curve.key_length
def can_sign(self):
return self.signing_key is not None
def sign_ssh_data(self, data, algorithm=None):
ecdsa = ec.ECDSA(self.ecdsa_curve.hash_object())
sig = self.signing_key.sign(data, ecdsa)
r, s = decode_dss_signature(sig)
m = Message()
m.add_string(self.ecdsa_curve.key_format_identifier)
m.add_string(self._sigencode(r, s))
return m
def verify_ssh_sig(self, data, msg):
if msg.get_text() != self.ecdsa_curve.key_format_identifier:
return False
sig = msg.get_binary()
sigR, sigS = self._sigdecode(sig)
signature = encode_dss_signature(sigR, sigS)
try:
self.verifying_key.verify(
signature, data, ec.ECDSA(self.ecdsa_curve.hash_object())
)
except InvalidSignature:
return False
else:
return True
def write_private_key_file(self, filename, password=None):
self._write_private_key_file(
filename,
self.signing_key,
serialization.PrivateFormat.TraditionalOpenSSL,
password=password,
)
def write_private_key(self, file_obj, password=None):
self._write_private_key(
file_obj,
self.signing_key,
serialization.PrivateFormat.TraditionalOpenSSL,
password=password,
)
@classmethod
def generate(cls, curve=ec.SECP256R1(), progress_func=None, bits=None):
"""
Generate a new private ECDSA key. This factory function can be used to
generate a new host key or authentication key.
:param progress_func: Not used for this type of key.
:returns: A new private key (`.ECDSAKey`) object
"""
if bits is not None:
curve = cls._ECDSA_CURVES.get_by_key_length(bits)
if curve is None:
raise ValueError("Unsupported key length: {:d}".format(bits))
curve = curve.curve_class()
private_key = ec.generate_private_key(curve, backend=default_backend())
return ECDSAKey(vals=(private_key, private_key.public_key()))
# ...internals...
def _from_private_key_file(self, filename, password):
data = self._read_private_key_file("EC", filename, password)
self._decode_key(data)
def _from_private_key(self, file_obj, password):
data = self._read_private_key("EC", file_obj, password)
self._decode_key(data)
def _decode_key(self, data):
pkformat, data = data
if pkformat == self._PRIVATE_KEY_FORMAT_ORIGINAL:
try:
key = serialization.load_der_private_key(
data, password=None, backend=default_backend()
)
except (
ValueError,
AssertionError,
TypeError,
UnsupportedAlgorithm,
) as e:
raise SSHException(str(e))
elif pkformat == self._PRIVATE_KEY_FORMAT_OPENSSH:
try:
msg = Message(data)
curve_name = msg.get_text()
verkey = msg.get_binary() # noqa: F841
sigkey = msg.get_mpint()
name = "ecdsa-sha2-" + curve_name
curve = self._ECDSA_CURVES.get_by_key_format_identifier(name)
if not curve:
raise SSHException("Invalid key curve identifier")
key = ec.derive_private_key(
sigkey, curve.curve_class(), default_backend()
)
except Exception as e:
# PKey._read_private_key_openssh() should check or return
# keytype - parsing could fail for any reason due to wrong type
raise SSHException(str(e))
else:
self._got_bad_key_format_id(pkformat)
self.signing_key = key
self.verifying_key = key.public_key()
curve_class = key.curve.__class__
self.ecdsa_curve = self._ECDSA_CURVES.get_by_curve_class(curve_class)
def _sigencode(self, r, s):
msg = Message()
msg.add_mpint(r)
msg.add_mpint(s)
return msg.asbytes()
def _sigdecode(self, sig):
msg = Message(sig)
r = msg.get_mpint()
s = msg.get_mpint()
return r, s

View File

@ -0,0 +1,212 @@
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import bcrypt
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher
import nacl.signing
from paramiko.message import Message
from paramiko.pkey import PKey, OPENSSH_AUTH_MAGIC, _unpad_openssh
from paramiko.util import b
from paramiko.ssh_exception import SSHException, PasswordRequiredException
class Ed25519Key(PKey):
"""
Representation of an `Ed25519 <https://ed25519.cr.yp.to/>`_ key.
.. note::
Ed25519 key support was added to OpenSSH in version 6.5.
.. versionadded:: 2.2
.. versionchanged:: 2.3
Added a ``file_obj`` parameter to match other key classes.
"""
name = "ssh-ed25519"
def __init__(
self, msg=None, data=None, filename=None, password=None, file_obj=None
):
self.public_blob = None
verifying_key = signing_key = None
if msg is None and data is not None:
msg = Message(data)
if msg is not None:
self._check_type_and_load_cert(
msg=msg,
key_type=self.name,
cert_type="ssh-ed25519-cert-v01@openssh.com",
)
verifying_key = nacl.signing.VerifyKey(msg.get_binary())
elif filename is not None:
with open(filename, "r") as f:
pkformat, data = self._read_private_key("OPENSSH", f)
elif file_obj is not None:
pkformat, data = self._read_private_key("OPENSSH", file_obj)
if filename or file_obj:
signing_key = self._parse_signing_key_data(data, password)
if signing_key is None and verifying_key is None:
raise ValueError("need a key")
self._signing_key = signing_key
self._verifying_key = verifying_key
def _parse_signing_key_data(self, data, password):
from paramiko.transport import Transport
# We may eventually want this to be usable for other key types, as
# OpenSSH moves to it, but for now this is just for Ed25519 keys.
# This format is described here:
# https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
# The description isn't totally complete, and I had to refer to the
# source for a full implementation.
message = Message(data)
if message.get_bytes(len(OPENSSH_AUTH_MAGIC)) != OPENSSH_AUTH_MAGIC:
raise SSHException("Invalid key")
ciphername = message.get_text()
kdfname = message.get_text()
kdfoptions = message.get_binary()
num_keys = message.get_int()
if kdfname == "none":
# kdfname of "none" must have an empty kdfoptions, the ciphername
# must be "none"
if kdfoptions or ciphername != "none":
raise SSHException("Invalid key")
elif kdfname == "bcrypt":
if not password:
raise PasswordRequiredException(
"Private key file is encrypted"
)
kdf = Message(kdfoptions)
bcrypt_salt = kdf.get_binary()
bcrypt_rounds = kdf.get_int()
else:
raise SSHException("Invalid key")
if ciphername != "none" and ciphername not in Transport._cipher_info:
raise SSHException("Invalid key")
public_keys = []
for _ in range(num_keys):
pubkey = Message(message.get_binary())
if pubkey.get_text() != self.name:
raise SSHException("Invalid key")
public_keys.append(pubkey.get_binary())
private_ciphertext = message.get_binary()
if ciphername == "none":
private_data = private_ciphertext
else:
cipher = Transport._cipher_info[ciphername]
key = bcrypt.kdf(
password=b(password),
salt=bcrypt_salt,
desired_key_bytes=cipher["key-size"] + cipher["block-size"],
rounds=bcrypt_rounds,
# We can't control how many rounds are on disk, so no sense
# warning about it.
ignore_few_rounds=True,
)
decryptor = Cipher(
cipher["class"](key[: cipher["key-size"]]),
cipher["mode"](key[cipher["key-size"] :]),
backend=default_backend(),
).decryptor()
private_data = (
decryptor.update(private_ciphertext) + decryptor.finalize()
)
message = Message(_unpad_openssh(private_data))
if message.get_int() != message.get_int():
raise SSHException("Invalid key")
signing_keys = []
for i in range(num_keys):
if message.get_text() != self.name:
raise SSHException("Invalid key")
# A copy of the public key, again, ignore.
public = message.get_binary()
key_data = message.get_binary()
# The second half of the key data is yet another copy of the public
# key...
signing_key = nacl.signing.SigningKey(key_data[:32])
# Verify that all the public keys are the same...
assert (
signing_key.verify_key.encode()
== public
== public_keys[i]
== key_data[32:]
)
signing_keys.append(signing_key)
# Comment, ignore.
message.get_binary()
if len(signing_keys) != 1:
raise SSHException("Invalid key")
return signing_keys[0]
def asbytes(self):
if self.can_sign():
v = self._signing_key.verify_key
else:
v = self._verifying_key
m = Message()
m.add_string(self.name)
m.add_string(v.encode())
return m.asbytes()
@property
def _fields(self):
if self.can_sign():
v = self._signing_key.verify_key
else:
v = self._verifying_key
return (self.get_name(), v)
# TODO 4.0: remove
def get_name(self):
return self.name
def get_bits(self):
return 256
def can_sign(self):
return self._signing_key is not None
def sign_ssh_data(self, data, algorithm=None):
m = Message()
m.add_string(self.name)
m.add_string(self._signing_key.sign(data).signature)
return m
def verify_ssh_sig(self, data, msg):
if msg.get_text() != self.name:
return False
try:
self._verifying_key.verify(data, msg.get_binary())
except nacl.exceptions.BadSignatureError:
return False
else:
return True

View File

@ -0,0 +1,528 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from io import BytesIO
from paramiko.common import (
linefeed_byte_value,
crlf,
cr_byte,
linefeed_byte,
cr_byte_value,
)
from paramiko.util import ClosingContextManager, u
class BufferedFile(ClosingContextManager):
"""
Reusable base class to implement Python-style file buffering around a
simpler stream.
"""
_DEFAULT_BUFSIZE = 8192
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
FLAG_READ = 0x1
FLAG_WRITE = 0x2
FLAG_APPEND = 0x4
FLAG_BINARY = 0x10
FLAG_BUFFERED = 0x20
FLAG_LINE_BUFFERED = 0x40
FLAG_UNIVERSAL_NEWLINE = 0x80
def __init__(self):
self.newlines = None
self._flags = 0
self._bufsize = self._DEFAULT_BUFSIZE
self._wbuffer = BytesIO()
self._rbuffer = bytes()
self._at_trailing_cr = False
self._closed = False
# pos - position within the file, according to the user
# realpos - position according the OS
# (these may be different because we buffer for line reading)
self._pos = self._realpos = 0
# size only matters for seekable files
self._size = 0
def __del__(self):
self.close()
def __iter__(self):
"""
Returns an iterator that can be used to iterate over the lines in this
file. This iterator happens to return the file itself, since a file is
its own iterator.
:raises: ``ValueError`` -- if the file is closed.
"""
if self._closed:
raise ValueError("I/O operation on closed file")
return self
def close(self):
"""
Close the file. Future read and write operations will fail.
"""
self.flush()
self._closed = True
def flush(self):
"""
Write out any data in the write buffer. This may do nothing if write
buffering is not turned on.
"""
self._write_all(self._wbuffer.getvalue())
self._wbuffer = BytesIO()
return
def __next__(self):
"""
Returns the next line from the input, or raises ``StopIteration``
when EOF is hit. Unlike python file objects, it's okay to mix
calls to `.next` and `.readline`.
:raises: ``StopIteration`` -- when the end of the file is reached.
:returns:
a line (`str`, or `bytes` if the file was opened in binary mode)
read from the file.
"""
line = self.readline()
if not line:
raise StopIteration
return line
def readable(self):
"""
Check if the file can be read from.
:returns:
`True` if the file can be read from. If `False`, `read` will raise
an exception.
"""
return (self._flags & self.FLAG_READ) == self.FLAG_READ
def writable(self):
"""
Check if the file can be written to.
:returns:
`True` if the file can be written to. If `False`, `write` will
raise an exception.
"""
return (self._flags & self.FLAG_WRITE) == self.FLAG_WRITE
def seekable(self):
"""
Check if the file supports random access.
:returns:
`True` if the file supports random access. If `False`, `seek` will
raise an exception.
"""
return False
def readinto(self, buff):
"""
Read up to ``len(buff)`` bytes into ``bytearray`` *buff* and return the
number of bytes read.
:returns:
The number of bytes read.
"""
data = self.read(len(buff))
buff[: len(data)] = data
return len(data)
def read(self, size=None):
"""
Read at most ``size`` bytes from the file (less if we hit the end of
the file first). If the ``size`` argument is negative or omitted,
read all the remaining data in the file.
.. note::
``'b'`` mode flag is ignored (``self.FLAG_BINARY`` in
``self._flags``), because SSH treats all files as binary, since we
have no idea what encoding the file is in, or even if the file is
text data.
:param int size: maximum number of bytes to read
:returns:
data read from the file (as bytes), or an empty string if EOF was
encountered immediately
"""
if self._closed:
raise IOError("File is closed")
if not (self._flags & self.FLAG_READ):
raise IOError("File is not open for reading")
if (size is None) or (size < 0):
# go for broke
result = bytearray(self._rbuffer)
self._rbuffer = bytes()
self._pos += len(result)
while True:
try:
new_data = self._read(self._DEFAULT_BUFSIZE)
except EOFError:
new_data = None
if (new_data is None) or (len(new_data) == 0):
break
result.extend(new_data)
self._realpos += len(new_data)
self._pos += len(new_data)
return bytes(result)
if size <= len(self._rbuffer):
result = self._rbuffer[:size]
self._rbuffer = self._rbuffer[size:]
self._pos += len(result)
return result
while len(self._rbuffer) < size:
read_size = size - len(self._rbuffer)
if self._flags & self.FLAG_BUFFERED:
read_size = max(self._bufsize, read_size)
try:
new_data = self._read(read_size)
except EOFError:
new_data = None
if (new_data is None) or (len(new_data) == 0):
break
self._rbuffer += new_data
self._realpos += len(new_data)
result = self._rbuffer[:size]
self._rbuffer = self._rbuffer[size:]
self._pos += len(result)
return result
def readline(self, size=None):
"""
Read one entire line from the file. A trailing newline character is
kept in the string (but may be absent when a file ends with an
incomplete line). If the size argument is present and non-negative, it
is a maximum byte count (including the trailing newline) and an
incomplete line may be returned. An empty string is returned only when
EOF is encountered immediately.
.. note::
Unlike stdio's ``fgets``, the returned string contains null
characters (``'\\0'``) if they occurred in the input.
:param int size: maximum length of returned string.
:returns:
next line of the file, or an empty string if the end of the
file has been reached.
If the file was opened in binary (``'b'``) mode: bytes are returned
Else: the encoding of the file is assumed to be UTF-8 and character
strings (`str`) are returned
"""
# it's almost silly how complex this function is.
if self._closed:
raise IOError("File is closed")
if not (self._flags & self.FLAG_READ):
raise IOError("File not open for reading")
line = self._rbuffer
truncated = False
while True:
if (
self._at_trailing_cr
and self._flags & self.FLAG_UNIVERSAL_NEWLINE
and len(line) > 0
):
# edge case: the newline may be '\r\n' and we may have read
# only the first '\r' last time.
if line[0] == linefeed_byte_value:
line = line[1:]
self._record_newline(crlf)
else:
self._record_newline(cr_byte)
self._at_trailing_cr = False
# check size before looking for a linefeed, in case we already have
# enough.
if (size is not None) and (size >= 0):
if len(line) >= size:
# truncate line
self._rbuffer = line[size:]
line = line[:size]
truncated = True
break
n = size - len(line)
else:
n = self._bufsize
if linefeed_byte in line or (
self._flags & self.FLAG_UNIVERSAL_NEWLINE and cr_byte in line
):
break
try:
new_data = self._read(n)
except EOFError:
new_data = None
if (new_data is None) or (len(new_data) == 0):
self._rbuffer = bytes()
self._pos += len(line)
return line if self._flags & self.FLAG_BINARY else u(line)
line += new_data
self._realpos += len(new_data)
# find the newline
pos = line.find(linefeed_byte)
if self._flags & self.FLAG_UNIVERSAL_NEWLINE:
rpos = line.find(cr_byte)
if (rpos >= 0) and (rpos < pos or pos < 0):
pos = rpos
if pos == -1:
# we couldn't find a newline in the truncated string, return it
self._pos += len(line)
return line if self._flags & self.FLAG_BINARY else u(line)
xpos = pos + 1
if (
line[pos] == cr_byte_value
and xpos < len(line)
and line[xpos] == linefeed_byte_value
):
xpos += 1
# if the string was truncated, _rbuffer needs to have the string after
# the newline character plus the truncated part of the line we stored
# earlier in _rbuffer
if truncated:
self._rbuffer = line[xpos:] + self._rbuffer
else:
self._rbuffer = line[xpos:]
lf = line[pos:xpos]
line = line[:pos] + linefeed_byte
if (len(self._rbuffer) == 0) and (lf == cr_byte):
# we could read the line up to a '\r' and there could still be a
# '\n' following that we read next time. note that and eat it.
self._at_trailing_cr = True
else:
self._record_newline(lf)
self._pos += len(line)
return line if self._flags & self.FLAG_BINARY else u(line)
def readlines(self, sizehint=None):
"""
Read all remaining lines using `readline` and return them as a list.
If the optional ``sizehint`` argument is present, instead of reading up
to EOF, whole lines totalling approximately sizehint bytes (possibly
after rounding up to an internal buffer size) are read.
:param int sizehint: desired maximum number of bytes to read.
:returns: list of lines read from the file.
"""
lines = []
byte_count = 0
while True:
line = self.readline()
if len(line) == 0:
break
lines.append(line)
byte_count += len(line)
if (sizehint is not None) and (byte_count >= sizehint):
break
return lines
def seek(self, offset, whence=0):
"""
Set the file's current position, like stdio's ``fseek``. Not all file
objects support seeking.
.. note::
If a file is opened in append mode (``'a'`` or ``'a+'``), any seek
operations will be undone at the next write (as the file position
will move back to the end of the file).
:param int offset:
position to move to within the file, relative to ``whence``.
:param int whence:
type of movement: 0 = absolute; 1 = relative to the current
position; 2 = relative to the end of the file.
:raises: ``IOError`` -- if the file doesn't support random access.
"""
raise IOError("File does not support seeking.")
def tell(self):
"""
Return the file's current position. This may not be accurate or
useful if the underlying file doesn't support random access, or was
opened in append mode.
:returns: file position (`number <int>` of bytes).
"""
return self._pos
def write(self, data):
"""
Write data to the file. If write buffering is on (``bufsize`` was
specified and non-zero), some or all of the data may not actually be
written yet. (Use `flush` or `close` to force buffered data to be
written out.)
:param data: ``str``/``bytes`` data to write
"""
if isinstance(data, str):
# Accept text and encode as utf-8 for compatibility only.
data = data.encode("utf-8")
if self._closed:
raise IOError("File is closed")
if not (self._flags & self.FLAG_WRITE):
raise IOError("File not open for writing")
if not (self._flags & self.FLAG_BUFFERED):
self._write_all(data)
return
self._wbuffer.write(data)
if self._flags & self.FLAG_LINE_BUFFERED:
# only scan the new data for linefeed, to avoid wasting time.
last_newline_pos = data.rfind(linefeed_byte)
if last_newline_pos >= 0:
wbuf = self._wbuffer.getvalue()
last_newline_pos += len(wbuf) - len(data)
self._write_all(wbuf[: last_newline_pos + 1])
self._wbuffer = BytesIO()
self._wbuffer.write(wbuf[last_newline_pos + 1 :])
return
# even if we're line buffering, if the buffer has grown past the
# buffer size, force a flush.
if self._wbuffer.tell() >= self._bufsize:
self.flush()
return
def writelines(self, sequence):
"""
Write a sequence of strings to the file. The sequence can be any
iterable object producing strings, typically a list of strings. (The
name is intended to match `readlines`; `writelines` does not add line
separators.)
:param sequence: an iterable sequence of strings.
"""
for line in sequence:
self.write(line)
return
def xreadlines(self):
"""
Identical to ``iter(f)``. This is a deprecated file interface that
predates Python iterator support.
"""
return self
@property
def closed(self):
return self._closed
# ...overrides...
def _read(self, size):
"""
(subclass override)
Read data from the stream. Return ``None`` or raise ``EOFError`` to
indicate EOF.
"""
raise EOFError()
def _write(self, data):
"""
(subclass override)
Write data into the stream.
"""
raise IOError("write not implemented")
def _get_size(self):
"""
(subclass override)
Return the size of the file. This is called from within `_set_mode`
if the file is opened in append mode, so the file position can be
tracked and `seek` and `tell` will work correctly. If the file is
a stream that can't be randomly accessed, you don't need to override
this method,
"""
return 0
# ...internals...
def _set_mode(self, mode="r", bufsize=-1):
"""
Subclasses call this method to initialize the BufferedFile.
"""
# set bufsize in any event, because it's used for readline().
self._bufsize = self._DEFAULT_BUFSIZE
if bufsize < 0:
# do no buffering by default, because otherwise writes will get
# buffered in a way that will probably confuse people.
bufsize = 0
if bufsize == 1:
# apparently, line buffering only affects writes. reads are only
# buffered if you call readline (directly or indirectly: iterating
# over a file will indirectly call readline).
self._flags |= self.FLAG_BUFFERED | self.FLAG_LINE_BUFFERED
elif bufsize > 1:
self._bufsize = bufsize
self._flags |= self.FLAG_BUFFERED
self._flags &= ~self.FLAG_LINE_BUFFERED
elif bufsize == 0:
# unbuffered
self._flags &= ~(self.FLAG_BUFFERED | self.FLAG_LINE_BUFFERED)
if ("r" in mode) or ("+" in mode):
self._flags |= self.FLAG_READ
if ("w" in mode) or ("+" in mode):
self._flags |= self.FLAG_WRITE
if "a" in mode:
self._flags |= self.FLAG_WRITE | self.FLAG_APPEND
self._size = self._get_size()
self._pos = self._realpos = self._size
if "b" in mode:
self._flags |= self.FLAG_BINARY
if "U" in mode:
self._flags |= self.FLAG_UNIVERSAL_NEWLINE
# built-in file objects have this attribute to store which kinds of
# line terminations they've seen:
# <http://www.python.org/doc/current/lib/built-in-funcs.html>
self.newlines = None
def _write_all(self, raw_data):
# the underlying stream may be something that does partial writes (like
# a socket).
data = memoryview(raw_data)
while len(data) > 0:
count = self._write(data)
data = data[count:]
if self._flags & self.FLAG_APPEND:
self._size += count
self._pos = self._realpos = self._size
else:
self._pos += count
self._realpos += count
return None
def _record_newline(self, newline):
# silliness about tracking what kinds of newlines we've seen.
# i don't understand why it can be None, a string, or a tuple, instead
# of just always being a tuple, but we'll emulate that behavior anyway.
if not (self._flags & self.FLAG_UNIVERSAL_NEWLINE):
return
if self.newlines is None:
self.newlines = newline
elif self.newlines != newline and isinstance(self.newlines, bytes):
self.newlines = (self.newlines, newline)
elif newline not in self.newlines:
self.newlines += (newline,)

View File

@ -0,0 +1,384 @@
# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from base64 import encodebytes, decodebytes
import binascii
import os
import re
from collections.abc import MutableMapping
from hashlib import sha1
from hmac import HMAC
from paramiko.pkey import PKey, UnknownKeyType
from paramiko.util import get_logger, constant_time_bytes_eq, b, u
from paramiko.ssh_exception import SSHException
class HostKeys(MutableMapping):
"""
Representation of an OpenSSH-style "known hosts" file. Host keys can be
read from one or more files, and then individual hosts can be looked up to
verify server keys during SSH negotiation.
A `.HostKeys` object can be treated like a dict; any dict lookup is
equivalent to calling `lookup`.
.. versionadded:: 1.5.3
"""
def __init__(self, filename=None):
"""
Create a new HostKeys object, optionally loading keys from an OpenSSH
style host-key file.
:param str filename: filename to load host keys from, or ``None``
"""
# emulate a dict of { hostname: { keytype: PKey } }
self._entries = []
if filename is not None:
self.load(filename)
def add(self, hostname, keytype, key):
"""
Add a host key entry to the table. Any existing entry for a
``(hostname, keytype)`` pair will be replaced.
:param str hostname: the hostname (or IP) to add
:param str keytype: key type (``"ssh-rsa"`` or ``"ssh-dss"``)
:param .PKey key: the key to add
"""
for e in self._entries:
if (hostname in e.hostnames) and (e.key.get_name() == keytype):
e.key = key
return
self._entries.append(HostKeyEntry([hostname], key))
def load(self, filename):
"""
Read a file of known SSH host keys, in the format used by OpenSSH.
This type of file unfortunately doesn't exist on Windows, but on
posix, it will usually be stored in
``os.path.expanduser("~/.ssh/known_hosts")``.
If this method is called multiple times, the host keys are merged,
not cleared. So multiple calls to `load` will just call `add`,
replacing any existing entries and adding new ones.
:param str filename: name of the file to read host keys from
:raises: ``IOError`` -- if there was an error reading the file
"""
with open(filename, "r") as f:
for lineno, line in enumerate(f, 1):
line = line.strip()
if (len(line) == 0) or (line[0] == "#"):
continue
try:
entry = HostKeyEntry.from_line(line, lineno)
except SSHException:
continue
if entry is not None:
_hostnames = entry.hostnames
for h in _hostnames:
if self.check(h, entry.key):
entry.hostnames.remove(h)
if len(entry.hostnames):
self._entries.append(entry)
def save(self, filename):
"""
Save host keys into a file, in the format used by OpenSSH. The order
of keys in the file will be preserved when possible (if these keys were
loaded from a file originally). The single exception is that combined
lines will be split into individual key lines, which is arguably a bug.
:param str filename: name of the file to write
:raises: ``IOError`` -- if there was an error writing the file
.. versionadded:: 1.6.1
"""
with open(filename, "w") as f:
for e in self._entries:
line = e.to_line()
if line:
f.write(line)
def lookup(self, hostname):
"""
Find a hostkey entry for a given hostname or IP. If no entry is found,
``None`` is returned. Otherwise a dictionary of keytype to key is
returned. The keytype will be either ``"ssh-rsa"`` or ``"ssh-dss"``.
:param str hostname: the hostname (or IP) to lookup
:return: dict of `str` -> `.PKey` keys associated with this host
(or ``None``)
"""
class SubDict(MutableMapping):
def __init__(self, hostname, entries, hostkeys):
self._hostname = hostname
self._entries = entries
self._hostkeys = hostkeys
def __iter__(self):
for k in self.keys():
yield k
def __len__(self):
return len(self.keys())
def __delitem__(self, key):
for e in list(self._entries):
if e.key.get_name() == key:
self._entries.remove(e)
break
else:
raise KeyError(key)
def __getitem__(self, key):
for e in self._entries:
if e.key.get_name() == key:
return e.key
raise KeyError(key)
def __setitem__(self, key, val):
for e in self._entries:
if e.key is None:
continue
if e.key.get_name() == key:
# replace
e.key = val
break
else:
# add a new one
e = HostKeyEntry([hostname], val)
self._entries.append(e)
self._hostkeys._entries.append(e)
def keys(self):
return [
e.key.get_name()
for e in self._entries
if e.key is not None
]
entries = []
for e in self._entries:
if self._hostname_matches(hostname, e):
entries.append(e)
if len(entries) == 0:
return None
return SubDict(hostname, entries, self)
def _hostname_matches(self, hostname, entry):
"""
Tests whether ``hostname`` string matches given SubDict ``entry``.
:returns bool:
"""
for h in entry.hostnames:
if (
h == hostname
or h.startswith("|1|")
and not hostname.startswith("|1|")
and constant_time_bytes_eq(self.hash_host(hostname, h), h)
):
return True
return False
def check(self, hostname, key):
"""
Return True if the given key is associated with the given hostname
in this dictionary.
:param str hostname: hostname (or IP) of the SSH server
:param .PKey key: the key to check
:return:
``True`` if the key is associated with the hostname; else ``False``
"""
k = self.lookup(hostname)
if k is None:
return False
host_key = k.get(key.get_name(), None)
if host_key is None:
return False
return host_key.asbytes() == key.asbytes()
def clear(self):
"""
Remove all host keys from the dictionary.
"""
self._entries = []
def __iter__(self):
for k in self.keys():
yield k
def __len__(self):
return len(self.keys())
def __getitem__(self, key):
ret = self.lookup(key)
if ret is None:
raise KeyError(key)
return ret
def __delitem__(self, key):
index = None
for i, entry in enumerate(self._entries):
if self._hostname_matches(key, entry):
index = i
break
if index is None:
raise KeyError(key)
self._entries.pop(index)
def __setitem__(self, hostname, entry):
# don't use this please.
if len(entry) == 0:
self._entries.append(HostKeyEntry([hostname], None))
return
for key_type in entry.keys():
found = False
for e in self._entries:
if (hostname in e.hostnames) and e.key.get_name() == key_type:
# replace
e.key = entry[key_type]
found = True
if not found:
self._entries.append(HostKeyEntry([hostname], entry[key_type]))
def keys(self):
ret = []
for e in self._entries:
for h in e.hostnames:
if h not in ret:
ret.append(h)
return ret
def values(self):
ret = []
for k in self.keys():
ret.append(self.lookup(k))
return ret
@staticmethod
def hash_host(hostname, salt=None):
"""
Return a "hashed" form of the hostname, as used by OpenSSH when storing
hashed hostnames in the known_hosts file.
:param str hostname: the hostname to hash
:param str salt: optional salt to use when hashing
(must be 20 bytes long)
:return: the hashed hostname as a `str`
"""
if salt is None:
salt = os.urandom(sha1().digest_size)
else:
if salt.startswith("|1|"):
salt = salt.split("|")[2]
salt = decodebytes(b(salt))
assert len(salt) == sha1().digest_size
hmac = HMAC(salt, b(hostname), sha1).digest()
hostkey = "|1|{}|{}".format(u(encodebytes(salt)), u(encodebytes(hmac)))
return hostkey.replace("\n", "")
class InvalidHostKey(Exception):
def __init__(self, line, exc):
self.line = line
self.exc = exc
self.args = (line, exc)
class HostKeyEntry:
"""
Representation of a line in an OpenSSH-style "known hosts" file.
"""
def __init__(self, hostnames=None, key=None):
self.valid = (hostnames is not None) and (key is not None)
self.hostnames = hostnames
self.key = key
@classmethod
def from_line(cls, line, lineno=None):
"""
Parses the given line of text to find the names for the host,
the type of key, and the key data. The line is expected to be in the
format used by the OpenSSH known_hosts file. Fields are separated by a
single space or tab.
Lines are expected to not have leading or trailing whitespace.
We don't bother to check for comments or empty lines. All of
that should be taken care of before sending the line to us.
:param str line: a line from an OpenSSH known_hosts file
"""
log = get_logger("paramiko.hostkeys")
fields = re.split(" |\t", line)
if len(fields) < 3:
# Bad number of fields
msg = "Not enough fields found in known_hosts in line {} ({!r})"
log.info(msg.format(lineno, line))
return None
fields = fields[:3]
names, key_type, key = fields
names = names.split(",")
# Decide what kind of key we're looking at and create an object
# to hold it accordingly.
try:
# TODO: this grew organically and doesn't seem /wrong/ per se (file
# read -> unicode str -> bytes for base64 decode -> decoded bytes);
# but in Python 3 forever land, can we simply use
# `base64.b64decode(str-from-file)` here?
key_bytes = decodebytes(b(key))
except binascii.Error as e:
raise InvalidHostKey(line, e)
try:
return cls(names, PKey.from_type_string(key_type, key_bytes))
except UnknownKeyType:
# TODO 4.0: consider changing HostKeys API so this just raises
# naturally and the exception is muted higher up in the stack?
log.info("Unable to handle key of type {}".format(key_type))
return None
def to_line(self):
"""
Returns a string in OpenSSH known_hosts file format, or None if
the object is not in a valid state. A trailing newline is
included.
"""
if self.valid:
return "{} {} {}\n".format(
",".join(self.hostnames),
self.key.get_name(),
self.key.get_base64(),
)
return None
def __repr__(self):
return "<HostKeyEntry {!r}: {!r}>".format(self.hostnames, self.key)

View File

@ -0,0 +1,131 @@
import binascii
import hashlib
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.primitives import constant_time, serialization
from cryptography.hazmat.primitives.asymmetric.x25519 import (
X25519PrivateKey,
X25519PublicKey,
)
from paramiko.message import Message
from paramiko.common import byte_chr
from paramiko.ssh_exception import SSHException
_MSG_KEXECDH_INIT, _MSG_KEXECDH_REPLY = range(30, 32)
c_MSG_KEXECDH_INIT, c_MSG_KEXECDH_REPLY = [byte_chr(c) for c in range(30, 32)]
class KexCurve25519:
hash_algo = hashlib.sha256
def __init__(self, transport):
self.transport = transport
self.key = None
@classmethod
def is_available(cls):
try:
X25519PrivateKey.generate()
except UnsupportedAlgorithm:
return False
else:
return True
def _perform_exchange(self, peer_key):
secret = self.key.exchange(peer_key)
if constant_time.bytes_eq(secret, b"\x00" * 32):
raise SSHException(
"peer's curve25519 public value has wrong order"
)
return secret
def start_kex(self):
self.key = X25519PrivateKey.generate()
if self.transport.server_mode:
self.transport._expect_packet(_MSG_KEXECDH_INIT)
return
m = Message()
m.add_byte(c_MSG_KEXECDH_INIT)
m.add_string(
self.key.public_key().public_bytes(
serialization.Encoding.Raw, serialization.PublicFormat.Raw
)
)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXECDH_REPLY)
def parse_next(self, ptype, m):
if self.transport.server_mode and (ptype == _MSG_KEXECDH_INIT):
return self._parse_kexecdh_init(m)
elif not self.transport.server_mode and (ptype == _MSG_KEXECDH_REPLY):
return self._parse_kexecdh_reply(m)
raise SSHException(
"KexCurve25519 asked to handle packet type {:d}".format(ptype)
)
def _parse_kexecdh_init(self, m):
peer_key_bytes = m.get_string()
peer_key = X25519PublicKey.from_public_bytes(peer_key_bytes)
K = self._perform_exchange(peer_key)
K = int(binascii.hexlify(K), 16)
# compute exchange hash
hm = Message()
hm.add(
self.transport.remote_version,
self.transport.local_version,
self.transport.remote_kex_init,
self.transport.local_kex_init,
)
server_key_bytes = self.transport.get_server_key().asbytes()
exchange_key_bytes = self.key.public_key().public_bytes(
serialization.Encoding.Raw, serialization.PublicFormat.Raw
)
hm.add_string(server_key_bytes)
hm.add_string(peer_key_bytes)
hm.add_string(exchange_key_bytes)
hm.add_mpint(K)
H = self.hash_algo(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
sig = self.transport.get_server_key().sign_ssh_data(
H, self.transport.host_key_type
)
# construct reply
m = Message()
m.add_byte(c_MSG_KEXECDH_REPLY)
m.add_string(server_key_bytes)
m.add_string(exchange_key_bytes)
m.add_string(sig)
self.transport._send_message(m)
self.transport._activate_outbound()
def _parse_kexecdh_reply(self, m):
peer_host_key_bytes = m.get_string()
peer_key_bytes = m.get_string()
sig = m.get_binary()
peer_key = X25519PublicKey.from_public_bytes(peer_key_bytes)
K = self._perform_exchange(peer_key)
K = int(binascii.hexlify(K), 16)
# compute exchange hash and verify signature
hm = Message()
hm.add(
self.transport.local_version,
self.transport.remote_version,
self.transport.local_kex_init,
self.transport.remote_kex_init,
)
hm.add_string(peer_host_key_bytes)
hm.add_string(
self.key.public_key().public_bytes(
serialization.Encoding.Raw, serialization.PublicFormat.Raw
)
)
hm.add_string(peer_key_bytes)
hm.add_mpint(K)
self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest())
self.transport._verify_key(peer_host_key_bytes, sig)
self.transport._activate_outbound()

View File

@ -0,0 +1,151 @@
"""
Ephemeral Elliptic Curve Diffie-Hellman (ECDH) key exchange
RFC 5656, Section 4
"""
from hashlib import sha256, sha384, sha512
from paramiko.common import byte_chr
from paramiko.message import Message
from paramiko.ssh_exception import SSHException
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives import serialization
from binascii import hexlify
_MSG_KEXECDH_INIT, _MSG_KEXECDH_REPLY = range(30, 32)
c_MSG_KEXECDH_INIT, c_MSG_KEXECDH_REPLY = [byte_chr(c) for c in range(30, 32)]
class KexNistp256:
name = "ecdh-sha2-nistp256"
hash_algo = sha256
curve = ec.SECP256R1()
def __init__(self, transport):
self.transport = transport
# private key, client public and server public keys
self.P = 0
self.Q_C = None
self.Q_S = None
def start_kex(self):
self._generate_key_pair()
if self.transport.server_mode:
self.transport._expect_packet(_MSG_KEXECDH_INIT)
return
m = Message()
m.add_byte(c_MSG_KEXECDH_INIT)
# SEC1: V2.0 2.3.3 Elliptic-Curve-Point-to-Octet-String Conversion
m.add_string(
self.Q_C.public_bytes(
serialization.Encoding.X962,
serialization.PublicFormat.UncompressedPoint,
)
)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXECDH_REPLY)
def parse_next(self, ptype, m):
if self.transport.server_mode and (ptype == _MSG_KEXECDH_INIT):
return self._parse_kexecdh_init(m)
elif not self.transport.server_mode and (ptype == _MSG_KEXECDH_REPLY):
return self._parse_kexecdh_reply(m)
raise SSHException(
"KexECDH asked to handle packet type {:d}".format(ptype)
)
def _generate_key_pair(self):
self.P = ec.generate_private_key(self.curve, default_backend())
if self.transport.server_mode:
self.Q_S = self.P.public_key()
return
self.Q_C = self.P.public_key()
def _parse_kexecdh_init(self, m):
Q_C_bytes = m.get_string()
self.Q_C = ec.EllipticCurvePublicKey.from_encoded_point(
self.curve, Q_C_bytes
)
K_S = self.transport.get_server_key().asbytes()
K = self.P.exchange(ec.ECDH(), self.Q_C)
K = int(hexlify(K), 16)
# compute exchange hash
hm = Message()
hm.add(
self.transport.remote_version,
self.transport.local_version,
self.transport.remote_kex_init,
self.transport.local_kex_init,
)
hm.add_string(K_S)
hm.add_string(Q_C_bytes)
# SEC1: V2.0 2.3.3 Elliptic-Curve-Point-to-Octet-String Conversion
hm.add_string(
self.Q_S.public_bytes(
serialization.Encoding.X962,
serialization.PublicFormat.UncompressedPoint,
)
)
hm.add_mpint(int(K))
H = self.hash_algo(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
sig = self.transport.get_server_key().sign_ssh_data(
H, self.transport.host_key_type
)
# construct reply
m = Message()
m.add_byte(c_MSG_KEXECDH_REPLY)
m.add_string(K_S)
m.add_string(
self.Q_S.public_bytes(
serialization.Encoding.X962,
serialization.PublicFormat.UncompressedPoint,
)
)
m.add_string(sig)
self.transport._send_message(m)
self.transport._activate_outbound()
def _parse_kexecdh_reply(self, m):
K_S = m.get_string()
Q_S_bytes = m.get_string()
self.Q_S = ec.EllipticCurvePublicKey.from_encoded_point(
self.curve, Q_S_bytes
)
sig = m.get_binary()
K = self.P.exchange(ec.ECDH(), self.Q_S)
K = int(hexlify(K), 16)
# compute exchange hash and verify signature
hm = Message()
hm.add(
self.transport.local_version,
self.transport.remote_version,
self.transport.local_kex_init,
self.transport.remote_kex_init,
)
hm.add_string(K_S)
# SEC1: V2.0 2.3.3 Elliptic-Curve-Point-to-Octet-String Conversion
hm.add_string(
self.Q_C.public_bytes(
serialization.Encoding.X962,
serialization.PublicFormat.UncompressedPoint,
)
)
hm.add_string(Q_S_bytes)
hm.add_mpint(K)
self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest())
self.transport._verify_key(K_S, sig)
self.transport._activate_outbound()
class KexNistp384(KexNistp256):
name = "ecdh-sha2-nistp384"
hash_algo = sha384
curve = ec.SECP384R1()
class KexNistp521(KexNistp256):
name = "ecdh-sha2-nistp521"
hash_algo = sha512
curve = ec.SECP521R1()

View File

@ -0,0 +1,288 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Variant on `KexGroup1 <paramiko.kex_group1.KexGroup1>` where the prime "p" and
generator "g" are provided by the server. A bit more work is required on the
client side, and a **lot** more on the server side.
"""
import os
from hashlib import sha1, sha256
from paramiko import util
from paramiko.common import DEBUG, byte_chr, byte_ord, byte_mask
from paramiko.message import Message
from paramiko.ssh_exception import SSHException
(
_MSG_KEXDH_GEX_REQUEST_OLD,
_MSG_KEXDH_GEX_GROUP,
_MSG_KEXDH_GEX_INIT,
_MSG_KEXDH_GEX_REPLY,
_MSG_KEXDH_GEX_REQUEST,
) = range(30, 35)
(
c_MSG_KEXDH_GEX_REQUEST_OLD,
c_MSG_KEXDH_GEX_GROUP,
c_MSG_KEXDH_GEX_INIT,
c_MSG_KEXDH_GEX_REPLY,
c_MSG_KEXDH_GEX_REQUEST,
) = [byte_chr(c) for c in range(30, 35)]
class KexGex:
name = "diffie-hellman-group-exchange-sha1"
min_bits = 1024
max_bits = 8192
preferred_bits = 2048
hash_algo = sha1
def __init__(self, transport):
self.transport = transport
self.p = None
self.q = None
self.g = None
self.x = None
self.e = None
self.f = None
self.old_style = False
def start_kex(self, _test_old_style=False):
if self.transport.server_mode:
self.transport._expect_packet(
_MSG_KEXDH_GEX_REQUEST, _MSG_KEXDH_GEX_REQUEST_OLD
)
return
# request a bit range: we accept (min_bits) to (max_bits), but prefer
# (preferred_bits). according to the spec, we shouldn't pull the
# minimum up above 1024.
m = Message()
if _test_old_style:
# only used for unit tests: we shouldn't ever send this
m.add_byte(c_MSG_KEXDH_GEX_REQUEST_OLD)
m.add_int(self.preferred_bits)
self.old_style = True
else:
m.add_byte(c_MSG_KEXDH_GEX_REQUEST)
m.add_int(self.min_bits)
m.add_int(self.preferred_bits)
m.add_int(self.max_bits)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_GROUP)
def parse_next(self, ptype, m):
if ptype == _MSG_KEXDH_GEX_REQUEST:
return self._parse_kexdh_gex_request(m)
elif ptype == _MSG_KEXDH_GEX_GROUP:
return self._parse_kexdh_gex_group(m)
elif ptype == _MSG_KEXDH_GEX_INIT:
return self._parse_kexdh_gex_init(m)
elif ptype == _MSG_KEXDH_GEX_REPLY:
return self._parse_kexdh_gex_reply(m)
elif ptype == _MSG_KEXDH_GEX_REQUEST_OLD:
return self._parse_kexdh_gex_request_old(m)
msg = "KexGex {} asked to handle packet type {:d}"
raise SSHException(msg.format(self.name, ptype))
# ...internals...
def _generate_x(self):
# generate an "x" (1 < x < (p-1)/2).
q = (self.p - 1) // 2
qnorm = util.deflate_long(q, 0)
qhbyte = byte_ord(qnorm[0])
byte_count = len(qnorm)
qmask = 0xFF
while not (qhbyte & 0x80):
qhbyte <<= 1
qmask >>= 1
while True:
x_bytes = os.urandom(byte_count)
x_bytes = byte_mask(x_bytes[0], qmask) + x_bytes[1:]
x = util.inflate_long(x_bytes, 1)
if (x > 1) and (x < q):
break
self.x = x
def _parse_kexdh_gex_request(self, m):
minbits = m.get_int()
preferredbits = m.get_int()
maxbits = m.get_int()
# smoosh the user's preferred size into our own limits
if preferredbits > self.max_bits:
preferredbits = self.max_bits
if preferredbits < self.min_bits:
preferredbits = self.min_bits
# fix min/max if they're inconsistent. technically, we could just pout
# and hang up, but there's no harm in giving them the benefit of the
# doubt and just picking a bitsize for them.
if minbits > preferredbits:
minbits = preferredbits
if maxbits < preferredbits:
maxbits = preferredbits
# now save a copy
self.min_bits = minbits
self.preferred_bits = preferredbits
self.max_bits = maxbits
# generate prime
pack = self.transport._get_modulus_pack()
if pack is None:
raise SSHException("Can't do server-side gex with no modulus pack")
self.transport._log(
DEBUG,
"Picking p ({} <= {} <= {} bits)".format(
minbits, preferredbits, maxbits
),
)
self.g, self.p = pack.get_modulus(minbits, preferredbits, maxbits)
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_GROUP)
m.add_mpint(self.p)
m.add_mpint(self.g)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_INIT)
def _parse_kexdh_gex_request_old(self, m):
# same as above, but without min_bits or max_bits (used by older
# clients like putty)
self.preferred_bits = m.get_int()
# smoosh the user's preferred size into our own limits
if self.preferred_bits > self.max_bits:
self.preferred_bits = self.max_bits
if self.preferred_bits < self.min_bits:
self.preferred_bits = self.min_bits
# generate prime
pack = self.transport._get_modulus_pack()
if pack is None:
raise SSHException("Can't do server-side gex with no modulus pack")
self.transport._log(
DEBUG, "Picking p (~ {} bits)".format(self.preferred_bits)
)
self.g, self.p = pack.get_modulus(
self.min_bits, self.preferred_bits, self.max_bits
)
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_GROUP)
m.add_mpint(self.p)
m.add_mpint(self.g)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_INIT)
self.old_style = True
def _parse_kexdh_gex_group(self, m):
self.p = m.get_mpint()
self.g = m.get_mpint()
# reject if p's bit length < 1024 or > 8192
bitlen = util.bit_length(self.p)
if (bitlen < 1024) or (bitlen > 8192):
raise SSHException(
"Server-generated gex p (don't ask) is out of range "
"({} bits)".format(bitlen)
)
self.transport._log(DEBUG, "Got server p ({} bits)".format(bitlen))
self._generate_x()
# now compute e = g^x mod p
self.e = pow(self.g, self.x, self.p)
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_INIT)
m.add_mpint(self.e)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_REPLY)
def _parse_kexdh_gex_init(self, m):
self.e = m.get_mpint()
if (self.e < 1) or (self.e > self.p - 1):
raise SSHException('Client kex "e" is out of range')
self._generate_x()
self.f = pow(self.g, self.x, self.p)
K = pow(self.e, self.x, self.p)
key = self.transport.get_server_key().asbytes()
# okay, build up the hash H of
# (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
hm = Message()
hm.add(
self.transport.remote_version,
self.transport.local_version,
self.transport.remote_kex_init,
self.transport.local_kex_init,
key,
)
if not self.old_style:
hm.add_int(self.min_bits)
hm.add_int(self.preferred_bits)
if not self.old_style:
hm.add_int(self.max_bits)
hm.add_mpint(self.p)
hm.add_mpint(self.g)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
H = self.hash_algo(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
# sign it
sig = self.transport.get_server_key().sign_ssh_data(
H, self.transport.host_key_type
)
# send reply
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_REPLY)
m.add_string(key)
m.add_mpint(self.f)
m.add_string(sig)
self.transport._send_message(m)
self.transport._activate_outbound()
def _parse_kexdh_gex_reply(self, m):
host_key = m.get_string()
self.f = m.get_mpint()
sig = m.get_string()
if (self.f < 1) or (self.f > self.p - 1):
raise SSHException('Server kex "f" is out of range')
K = pow(self.f, self.x, self.p)
# okay, build up the hash H of
# (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
hm = Message()
hm.add(
self.transport.local_version,
self.transport.remote_version,
self.transport.local_kex_init,
self.transport.remote_kex_init,
host_key,
)
if not self.old_style:
hm.add_int(self.min_bits)
hm.add_int(self.preferred_bits)
if not self.old_style:
hm.add_int(self.max_bits)
hm.add_mpint(self.p)
hm.add_mpint(self.g)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest())
self.transport._verify_key(host_key, sig)
self.transport._activate_outbound()
class KexGexSHA256(KexGex):
name = "diffie-hellman-group-exchange-sha256"
hash_algo = sha256

View File

@ -0,0 +1,155 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of
1024 bit key halves, using a known "p" prime and "g" generator.
"""
import os
from hashlib import sha1
from paramiko import util
from paramiko.common import max_byte, zero_byte, byte_chr, byte_mask
from paramiko.message import Message
from paramiko.ssh_exception import SSHException
_MSG_KEXDH_INIT, _MSG_KEXDH_REPLY = range(30, 32)
c_MSG_KEXDH_INIT, c_MSG_KEXDH_REPLY = [byte_chr(c) for c in range(30, 32)]
b7fffffffffffffff = byte_chr(0x7F) + max_byte * 7
b0000000000000000 = zero_byte * 8
class KexGroup1:
# draft-ietf-secsh-transport-09.txt, page 17
P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF # noqa
G = 2
name = "diffie-hellman-group1-sha1"
hash_algo = sha1
def __init__(self, transport):
self.transport = transport
self.x = 0
self.e = 0
self.f = 0
def start_kex(self):
self._generate_x()
if self.transport.server_mode:
# compute f = g^x mod p, but don't send it yet
self.f = pow(self.G, self.x, self.P)
self.transport._expect_packet(_MSG_KEXDH_INIT)
return
# compute e = g^x mod p (where g=2), and send it
self.e = pow(self.G, self.x, self.P)
m = Message()
m.add_byte(c_MSG_KEXDH_INIT)
m.add_mpint(self.e)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_REPLY)
def parse_next(self, ptype, m):
if self.transport.server_mode and (ptype == _MSG_KEXDH_INIT):
return self._parse_kexdh_init(m)
elif not self.transport.server_mode and (ptype == _MSG_KEXDH_REPLY):
return self._parse_kexdh_reply(m)
msg = "KexGroup1 asked to handle packet type {:d}"
raise SSHException(msg.format(ptype))
# ...internals...
def _generate_x(self):
# generate an "x" (1 < x < q), where q is (p-1)/2.
# p is a 128-byte (1024-bit) number, where the first 64 bits are 1.
# therefore q can be approximated as a 2^1023. we drop the subset of
# potential x where the first 63 bits are 1, because some of those
# will be larger than q (but this is a tiny tiny subset of
# potential x).
while 1:
x_bytes = os.urandom(128)
x_bytes = byte_mask(x_bytes[0], 0x7F) + x_bytes[1:]
if (
x_bytes[:8] != b7fffffffffffffff
and x_bytes[:8] != b0000000000000000
):
break
self.x = util.inflate_long(x_bytes)
def _parse_kexdh_reply(self, m):
# client mode
host_key = m.get_string()
self.f = m.get_mpint()
if (self.f < 1) or (self.f > self.P - 1):
raise SSHException('Server kex "f" is out of range')
sig = m.get_binary()
K = pow(self.f, self.x, self.P)
# okay, build up the hash H of
# (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(
self.transport.local_version,
self.transport.remote_version,
self.transport.local_kex_init,
self.transport.remote_kex_init,
)
hm.add_string(host_key)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest())
self.transport._verify_key(host_key, sig)
self.transport._activate_outbound()
def _parse_kexdh_init(self, m):
# server mode
self.e = m.get_mpint()
if (self.e < 1) or (self.e > self.P - 1):
raise SSHException('Client kex "e" is out of range')
K = pow(self.e, self.x, self.P)
key = self.transport.get_server_key().asbytes()
# okay, build up the hash H of
# (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(
self.transport.remote_version,
self.transport.local_version,
self.transport.remote_kex_init,
self.transport.local_kex_init,
)
hm.add_string(key)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
H = self.hash_algo(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
# sign it
sig = self.transport.get_server_key().sign_ssh_data(
H, self.transport.host_key_type
)
# send reply
m = Message()
m.add_byte(c_MSG_KEXDH_REPLY)
m.add_string(key)
m.add_mpint(self.f)
m.add_string(sig)
self.transport._send_message(m)
self.transport._activate_outbound()

View File

@ -0,0 +1,40 @@
# Copyright (C) 2013 Torsten Landschoff <torsten@debian.org>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of
2048 bit key halves, using a known "p" prime and "g" generator.
"""
from paramiko.kex_group1 import KexGroup1
from hashlib import sha1, sha256
class KexGroup14(KexGroup1):
# http://tools.ietf.org/html/rfc3526#section-3
P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa
G = 2
name = "diffie-hellman-group14-sha1"
hash_algo = sha1
class KexGroup14SHA256(KexGroup14):
name = "diffie-hellman-group14-sha256"
hash_algo = sha256

View File

@ -0,0 +1,35 @@
# Copyright (C) 2019 Edgar Sousa <https://github.com/edgsousa>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of
4096 bit key halves, using a known "p" prime and "g" generator.
"""
from paramiko.kex_group1 import KexGroup1
from hashlib import sha512
class KexGroup16SHA512(KexGroup1):
name = "diffie-hellman-group16-sha512"
# http://tools.ietf.org/html/rfc3526#section-5
P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF # noqa
G = 2
name = "diffie-hellman-group16-sha512"
hash_algo = sha512

View File

@ -0,0 +1,686 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
# Copyright (C) 2013-2014 science + computing ag
# Author: Sebastian Deiss <sebastian.deiss@t-online.de>
#
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
This module provides GSS-API / SSPI Key Exchange as defined in :rfc:`4462`.
.. note:: Credential delegation is not supported in server mode.
.. note::
`RFC 4462 Section 2.2
<https://tools.ietf.org/html/rfc4462.html#section-2.2>`_ says we are not
required to implement GSS-API error messages. Thus, in many methods within
this module, if an error occurs an exception will be thrown and the
connection will be terminated.
.. seealso:: :doc:`/api/ssh_gss`
.. versionadded:: 1.15
"""
import os
from hashlib import sha1
from paramiko.common import (
DEBUG,
max_byte,
zero_byte,
byte_chr,
byte_mask,
byte_ord,
)
from paramiko import util
from paramiko.message import Message
from paramiko.ssh_exception import SSHException
(
MSG_KEXGSS_INIT,
MSG_KEXGSS_CONTINUE,
MSG_KEXGSS_COMPLETE,
MSG_KEXGSS_HOSTKEY,
MSG_KEXGSS_ERROR,
) = range(30, 35)
(MSG_KEXGSS_GROUPREQ, MSG_KEXGSS_GROUP) = range(40, 42)
(
c_MSG_KEXGSS_INIT,
c_MSG_KEXGSS_CONTINUE,
c_MSG_KEXGSS_COMPLETE,
c_MSG_KEXGSS_HOSTKEY,
c_MSG_KEXGSS_ERROR,
) = [byte_chr(c) for c in range(30, 35)]
(c_MSG_KEXGSS_GROUPREQ, c_MSG_KEXGSS_GROUP) = [
byte_chr(c) for c in range(40, 42)
]
class KexGSSGroup1:
"""
GSS-API / SSPI Authenticated Diffie-Hellman Key Exchange as defined in `RFC
4462 Section 2 <https://tools.ietf.org/html/rfc4462.html#section-2>`_
"""
# draft-ietf-secsh-transport-09.txt, page 17
P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF # noqa
G = 2
b7fffffffffffffff = byte_chr(0x7F) + max_byte * 7 # noqa
b0000000000000000 = zero_byte * 8 # noqa
NAME = "gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g=="
def __init__(self, transport):
self.transport = transport
self.kexgss = self.transport.kexgss_ctxt
self.gss_host = None
self.x = 0
self.e = 0
self.f = 0
def start_kex(self):
"""
Start the GSS-API / SSPI Authenticated Diffie-Hellman Key Exchange.
"""
self._generate_x()
if self.transport.server_mode:
# compute f = g^x mod p, but don't send it yet
self.f = pow(self.G, self.x, self.P)
self.transport._expect_packet(MSG_KEXGSS_INIT)
return
# compute e = g^x mod p (where g=2), and send it
self.e = pow(self.G, self.x, self.P)
# Initialize GSS-API Key Exchange
self.gss_host = self.transport.gss_host
m = Message()
m.add_byte(c_MSG_KEXGSS_INIT)
m.add_string(self.kexgss.ssh_init_sec_context(target=self.gss_host))
m.add_mpint(self.e)
self.transport._send_message(m)
self.transport._expect_packet(
MSG_KEXGSS_HOSTKEY,
MSG_KEXGSS_CONTINUE,
MSG_KEXGSS_COMPLETE,
MSG_KEXGSS_ERROR,
)
def parse_next(self, ptype, m):
"""
Parse the next packet.
:param ptype: The (string) type of the incoming packet
:param `.Message` m: The packet content
"""
if self.transport.server_mode and (ptype == MSG_KEXGSS_INIT):
return self._parse_kexgss_init(m)
elif not self.transport.server_mode and (ptype == MSG_KEXGSS_HOSTKEY):
return self._parse_kexgss_hostkey(m)
elif self.transport.server_mode and (ptype == MSG_KEXGSS_CONTINUE):
return self._parse_kexgss_continue(m)
elif not self.transport.server_mode and (ptype == MSG_KEXGSS_COMPLETE):
return self._parse_kexgss_complete(m)
elif ptype == MSG_KEXGSS_ERROR:
return self._parse_kexgss_error(m)
msg = "GSS KexGroup1 asked to handle packet type {:d}"
raise SSHException(msg.format(ptype))
# ## internals...
def _generate_x(self):
"""
generate an "x" (1 < x < q), where q is (p-1)/2.
p is a 128-byte (1024-bit) number, where the first 64 bits are 1.
therefore q can be approximated as a 2^1023. we drop the subset of
potential x where the first 63 bits are 1, because some of those will
be larger than q (but this is a tiny tiny subset of potential x).
"""
while 1:
x_bytes = os.urandom(128)
x_bytes = byte_mask(x_bytes[0], 0x7F) + x_bytes[1:]
first = x_bytes[:8]
if first not in (self.b7fffffffffffffff, self.b0000000000000000):
break
self.x = util.inflate_long(x_bytes)
def _parse_kexgss_hostkey(self, m):
"""
Parse the SSH2_MSG_KEXGSS_HOSTKEY message (client mode).
:param `.Message` m: The content of the SSH2_MSG_KEXGSS_HOSTKEY message
"""
# client mode
host_key = m.get_string()
self.transport.host_key = host_key
sig = m.get_string()
self.transport._verify_key(host_key, sig)
self.transport._expect_packet(MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE)
def _parse_kexgss_continue(self, m):
"""
Parse the SSH2_MSG_KEXGSS_CONTINUE message.
:param `.Message` m: The content of the SSH2_MSG_KEXGSS_CONTINUE
message
"""
if not self.transport.server_mode:
srv_token = m.get_string()
m = Message()
m.add_byte(c_MSG_KEXGSS_CONTINUE)
m.add_string(
self.kexgss.ssh_init_sec_context(
target=self.gss_host, recv_token=srv_token
)
)
self.transport.send_message(m)
self.transport._expect_packet(
MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR
)
else:
pass
def _parse_kexgss_complete(self, m):
"""
Parse the SSH2_MSG_KEXGSS_COMPLETE message (client mode).
:param `.Message` m: The content of the
SSH2_MSG_KEXGSS_COMPLETE message
"""
# client mode
if self.transport.host_key is None:
self.transport.host_key = NullHostKey()
self.f = m.get_mpint()
if (self.f < 1) or (self.f > self.P - 1):
raise SSHException('Server kex "f" is out of range')
mic_token = m.get_string()
# This must be TRUE, if there is a GSS-API token in this message.
bool = m.get_boolean()
srv_token = None
if bool:
srv_token = m.get_string()
K = pow(self.f, self.x, self.P)
# okay, build up the hash H of
# (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(
self.transport.local_version,
self.transport.remote_version,
self.transport.local_kex_init,
self.transport.remote_kex_init,
)
hm.add_string(self.transport.host_key.__str__())
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
H = sha1(str(hm)).digest()
self.transport._set_K_H(K, H)
if srv_token is not None:
self.kexgss.ssh_init_sec_context(
target=self.gss_host, recv_token=srv_token
)
self.kexgss.ssh_check_mic(mic_token, H)
else:
self.kexgss.ssh_check_mic(mic_token, H)
self.transport.gss_kex_used = True
self.transport._activate_outbound()
def _parse_kexgss_init(self, m):
"""
Parse the SSH2_MSG_KEXGSS_INIT message (server mode).
:param `.Message` m: The content of the SSH2_MSG_KEXGSS_INIT message
"""
# server mode
client_token = m.get_string()
self.e = m.get_mpint()
if (self.e < 1) or (self.e > self.P - 1):
raise SSHException('Client kex "e" is out of range')
K = pow(self.e, self.x, self.P)
self.transport.host_key = NullHostKey()
key = self.transport.host_key.__str__()
# okay, build up the hash H of
# (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(
self.transport.remote_version,
self.transport.local_version,
self.transport.remote_kex_init,
self.transport.local_kex_init,
)
hm.add_string(key)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
H = sha1(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
srv_token = self.kexgss.ssh_accept_sec_context(
self.gss_host, client_token
)
m = Message()
if self.kexgss._gss_srv_ctxt_status:
mic_token = self.kexgss.ssh_get_mic(
self.transport.session_id, gss_kex=True
)
m.add_byte(c_MSG_KEXGSS_COMPLETE)
m.add_mpint(self.f)
m.add_string(mic_token)
if srv_token is not None:
m.add_boolean(True)
m.add_string(srv_token)
else:
m.add_boolean(False)
self.transport._send_message(m)
self.transport.gss_kex_used = True
self.transport._activate_outbound()
else:
m.add_byte(c_MSG_KEXGSS_CONTINUE)
m.add_string(srv_token)
self.transport._send_message(m)
self.transport._expect_packet(
MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR
)
def _parse_kexgss_error(self, m):
"""
Parse the SSH2_MSG_KEXGSS_ERROR message (client mode).
The server may send a GSS-API error message. if it does, we display
the error by throwing an exception (client mode).
:param `.Message` m: The content of the SSH2_MSG_KEXGSS_ERROR message
:raise SSHException: Contains GSS-API major and minor status as well as
the error message and the language tag of the
message
"""
maj_status = m.get_int()
min_status = m.get_int()
err_msg = m.get_string()
m.get_string() # we don't care about the language!
raise SSHException(
"""GSS-API Error:
Major Status: {}
Minor Status: {}
Error Message: {}
""".format(
maj_status, min_status, err_msg
)
)
class KexGSSGroup14(KexGSSGroup1):
"""
GSS-API / SSPI Authenticated Diffie-Hellman Group14 Key Exchange as defined
in `RFC 4462 Section 2
<https://tools.ietf.org/html/rfc4462.html#section-2>`_
"""
P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa
G = 2
NAME = "gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g=="
class KexGSSGex:
"""
GSS-API / SSPI Authenticated Diffie-Hellman Group Exchange as defined in
`RFC 4462 Section 2 <https://tools.ietf.org/html/rfc4462.html#section-2>`_
"""
NAME = "gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g=="
min_bits = 1024
max_bits = 8192
preferred_bits = 2048
def __init__(self, transport):
self.transport = transport
self.kexgss = self.transport.kexgss_ctxt
self.gss_host = None
self.p = None
self.q = None
self.g = None
self.x = None
self.e = None
self.f = None
self.old_style = False
def start_kex(self):
"""
Start the GSS-API / SSPI Authenticated Diffie-Hellman Group Exchange
"""
if self.transport.server_mode:
self.transport._expect_packet(MSG_KEXGSS_GROUPREQ)
return
# request a bit range: we accept (min_bits) to (max_bits), but prefer
# (preferred_bits). according to the spec, we shouldn't pull the
# minimum up above 1024.
self.gss_host = self.transport.gss_host
m = Message()
m.add_byte(c_MSG_KEXGSS_GROUPREQ)
m.add_int(self.min_bits)
m.add_int(self.preferred_bits)
m.add_int(self.max_bits)
self.transport._send_message(m)
self.transport._expect_packet(MSG_KEXGSS_GROUP)
def parse_next(self, ptype, m):
"""
Parse the next packet.
:param ptype: The (string) type of the incoming packet
:param `.Message` m: The packet content
"""
if ptype == MSG_KEXGSS_GROUPREQ:
return self._parse_kexgss_groupreq(m)
elif ptype == MSG_KEXGSS_GROUP:
return self._parse_kexgss_group(m)
elif ptype == MSG_KEXGSS_INIT:
return self._parse_kexgss_gex_init(m)
elif ptype == MSG_KEXGSS_HOSTKEY:
return self._parse_kexgss_hostkey(m)
elif ptype == MSG_KEXGSS_CONTINUE:
return self._parse_kexgss_continue(m)
elif ptype == MSG_KEXGSS_COMPLETE:
return self._parse_kexgss_complete(m)
elif ptype == MSG_KEXGSS_ERROR:
return self._parse_kexgss_error(m)
msg = "KexGex asked to handle packet type {:d}"
raise SSHException(msg.format(ptype))
# ## internals...
def _generate_x(self):
# generate an "x" (1 < x < (p-1)/2).
q = (self.p - 1) // 2
qnorm = util.deflate_long(q, 0)
qhbyte = byte_ord(qnorm[0])
byte_count = len(qnorm)
qmask = 0xFF
while not (qhbyte & 0x80):
qhbyte <<= 1
qmask >>= 1
while True:
x_bytes = os.urandom(byte_count)
x_bytes = byte_mask(x_bytes[0], qmask) + x_bytes[1:]
x = util.inflate_long(x_bytes, 1)
if (x > 1) and (x < q):
break
self.x = x
def _parse_kexgss_groupreq(self, m):
"""
Parse the SSH2_MSG_KEXGSS_GROUPREQ message (server mode).
:param `.Message` m: The content of the
SSH2_MSG_KEXGSS_GROUPREQ message
"""
minbits = m.get_int()
preferredbits = m.get_int()
maxbits = m.get_int()
# smoosh the user's preferred size into our own limits
if preferredbits > self.max_bits:
preferredbits = self.max_bits
if preferredbits < self.min_bits:
preferredbits = self.min_bits
# fix min/max if they're inconsistent. technically, we could just pout
# and hang up, but there's no harm in giving them the benefit of the
# doubt and just picking a bitsize for them.
if minbits > preferredbits:
minbits = preferredbits
if maxbits < preferredbits:
maxbits = preferredbits
# now save a copy
self.min_bits = minbits
self.preferred_bits = preferredbits
self.max_bits = maxbits
# generate prime
pack = self.transport._get_modulus_pack()
if pack is None:
raise SSHException("Can't do server-side gex with no modulus pack")
self.transport._log(
DEBUG, # noqa
"Picking p ({} <= {} <= {} bits)".format(
minbits, preferredbits, maxbits
),
)
self.g, self.p = pack.get_modulus(minbits, preferredbits, maxbits)
m = Message()
m.add_byte(c_MSG_KEXGSS_GROUP)
m.add_mpint(self.p)
m.add_mpint(self.g)
self.transport._send_message(m)
self.transport._expect_packet(MSG_KEXGSS_INIT)
def _parse_kexgss_group(self, m):
"""
Parse the SSH2_MSG_KEXGSS_GROUP message (client mode).
:param `Message` m: The content of the SSH2_MSG_KEXGSS_GROUP message
"""
self.p = m.get_mpint()
self.g = m.get_mpint()
# reject if p's bit length < 1024 or > 8192
bitlen = util.bit_length(self.p)
if (bitlen < 1024) or (bitlen > 8192):
raise SSHException(
"Server-generated gex p (don't ask) is out of range "
"({} bits)".format(bitlen)
)
self.transport._log(
DEBUG, "Got server p ({} bits)".format(bitlen)
) # noqa
self._generate_x()
# now compute e = g^x mod p
self.e = pow(self.g, self.x, self.p)
m = Message()
m.add_byte(c_MSG_KEXGSS_INIT)
m.add_string(self.kexgss.ssh_init_sec_context(target=self.gss_host))
m.add_mpint(self.e)
self.transport._send_message(m)
self.transport._expect_packet(
MSG_KEXGSS_HOSTKEY,
MSG_KEXGSS_CONTINUE,
MSG_KEXGSS_COMPLETE,
MSG_KEXGSS_ERROR,
)
def _parse_kexgss_gex_init(self, m):
"""
Parse the SSH2_MSG_KEXGSS_INIT message (server mode).
:param `Message` m: The content of the SSH2_MSG_KEXGSS_INIT message
"""
client_token = m.get_string()
self.e = m.get_mpint()
if (self.e < 1) or (self.e > self.p - 1):
raise SSHException('Client kex "e" is out of range')
self._generate_x()
self.f = pow(self.g, self.x, self.p)
K = pow(self.e, self.x, self.p)
self.transport.host_key = NullHostKey()
key = self.transport.host_key.__str__()
# okay, build up the hash H of
# (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
hm = Message()
hm.add(
self.transport.remote_version,
self.transport.local_version,
self.transport.remote_kex_init,
self.transport.local_kex_init,
key,
)
hm.add_int(self.min_bits)
hm.add_int(self.preferred_bits)
hm.add_int(self.max_bits)
hm.add_mpint(self.p)
hm.add_mpint(self.g)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
H = sha1(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
srv_token = self.kexgss.ssh_accept_sec_context(
self.gss_host, client_token
)
m = Message()
if self.kexgss._gss_srv_ctxt_status:
mic_token = self.kexgss.ssh_get_mic(
self.transport.session_id, gss_kex=True
)
m.add_byte(c_MSG_KEXGSS_COMPLETE)
m.add_mpint(self.f)
m.add_string(mic_token)
if srv_token is not None:
m.add_boolean(True)
m.add_string(srv_token)
else:
m.add_boolean(False)
self.transport._send_message(m)
self.transport.gss_kex_used = True
self.transport._activate_outbound()
else:
m.add_byte(c_MSG_KEXGSS_CONTINUE)
m.add_string(srv_token)
self.transport._send_message(m)
self.transport._expect_packet(
MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR
)
def _parse_kexgss_hostkey(self, m):
"""
Parse the SSH2_MSG_KEXGSS_HOSTKEY message (client mode).
:param `Message` m: The content of the SSH2_MSG_KEXGSS_HOSTKEY message
"""
# client mode
host_key = m.get_string()
self.transport.host_key = host_key
sig = m.get_string()
self.transport._verify_key(host_key, sig)
self.transport._expect_packet(MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE)
def _parse_kexgss_continue(self, m):
"""
Parse the SSH2_MSG_KEXGSS_CONTINUE message.
:param `Message` m: The content of the SSH2_MSG_KEXGSS_CONTINUE message
"""
if not self.transport.server_mode:
srv_token = m.get_string()
m = Message()
m.add_byte(c_MSG_KEXGSS_CONTINUE)
m.add_string(
self.kexgss.ssh_init_sec_context(
target=self.gss_host, recv_token=srv_token
)
)
self.transport.send_message(m)
self.transport._expect_packet(
MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR
)
else:
pass
def _parse_kexgss_complete(self, m):
"""
Parse the SSH2_MSG_KEXGSS_COMPLETE message (client mode).
:param `Message` m: The content of the SSH2_MSG_KEXGSS_COMPLETE message
"""
if self.transport.host_key is None:
self.transport.host_key = NullHostKey()
self.f = m.get_mpint()
mic_token = m.get_string()
# This must be TRUE, if there is a GSS-API token in this message.
bool = m.get_boolean()
srv_token = None
if bool:
srv_token = m.get_string()
if (self.f < 1) or (self.f > self.p - 1):
raise SSHException('Server kex "f" is out of range')
K = pow(self.f, self.x, self.p)
# okay, build up the hash H of
# (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
hm = Message()
hm.add(
self.transport.local_version,
self.transport.remote_version,
self.transport.local_kex_init,
self.transport.remote_kex_init,
self.transport.host_key.__str__(),
)
if not self.old_style:
hm.add_int(self.min_bits)
hm.add_int(self.preferred_bits)
if not self.old_style:
hm.add_int(self.max_bits)
hm.add_mpint(self.p)
hm.add_mpint(self.g)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
H = sha1(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
if srv_token is not None:
self.kexgss.ssh_init_sec_context(
target=self.gss_host, recv_token=srv_token
)
self.kexgss.ssh_check_mic(mic_token, H)
else:
self.kexgss.ssh_check_mic(mic_token, H)
self.transport.gss_kex_used = True
self.transport._activate_outbound()
def _parse_kexgss_error(self, m):
"""
Parse the SSH2_MSG_KEXGSS_ERROR message (client mode).
The server may send a GSS-API error message. if it does, we display
the error by throwing an exception (client mode).
:param `Message` m: The content of the SSH2_MSG_KEXGSS_ERROR message
:raise SSHException: Contains GSS-API major and minor status as well as
the error message and the language tag of the
message
"""
maj_status = m.get_int()
min_status = m.get_int()
err_msg = m.get_string()
m.get_string() # we don't care about the language (lang_tag)!
raise SSHException(
"""GSS-API Error:
Major Status: {}
Minor Status: {}
Error Message: {}
""".format(
maj_status, min_status, err_msg
)
)
class NullHostKey:
"""
This class represents the Null Host Key for GSS-API Key Exchange as defined
in `RFC 4462 Section 5
<https://tools.ietf.org/html/rfc4462.html#section-5>`_
"""
def __init__(self):
self.key = ""
def __str__(self):
return self.key
def get_name(self):
return self.key

View File

@ -0,0 +1,318 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Implementation of an SSH2 "message".
"""
import struct
from io import BytesIO
from paramiko import util
from paramiko.common import zero_byte, max_byte, one_byte
from paramiko.util import u
class Message:
"""
An SSH2 message is a stream of bytes that encodes some combination of
strings, integers, bools, and infinite-precision integers. This class
builds or breaks down such a byte stream.
Normally you don't need to deal with anything this low-level, but it's
exposed for people implementing custom extensions, or features that
paramiko doesn't support yet.
"""
big_int = 0xFF000000
def __init__(self, content=None):
"""
Create a new SSH2 message.
:param bytes content:
the byte stream to use as the message content (passed in only when
decomposing a message).
"""
if content is not None:
self.packet = BytesIO(content)
else:
self.packet = BytesIO()
def __bytes__(self):
return self.asbytes()
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
return "paramiko.Message(" + repr(self.packet.getvalue()) + ")"
# TODO 4.0: just merge into __bytes__ (everywhere)
def asbytes(self):
"""
Return the byte stream content of this Message, as a `bytes`.
"""
return self.packet.getvalue()
def rewind(self):
"""
Rewind the message to the beginning as if no items had been parsed
out of it yet.
"""
self.packet.seek(0)
def get_remainder(self):
"""
Return the `bytes` of this message that haven't already been parsed and
returned.
"""
position = self.packet.tell()
remainder = self.packet.read()
self.packet.seek(position)
return remainder
def get_so_far(self):
"""
Returns the `bytes` of this message that have been parsed and
returned. The string passed into a message's constructor can be
regenerated by concatenating ``get_so_far`` and `get_remainder`.
"""
position = self.packet.tell()
self.rewind()
return self.packet.read(position)
def get_bytes(self, n):
"""
Return the next ``n`` bytes of the message, without decomposing into an
int, decoded string, etc. Just the raw bytes are returned. Returns a
string of ``n`` zero bytes if there weren't ``n`` bytes remaining in
the message.
"""
b = self.packet.read(n)
max_pad_size = 1 << 20 # Limit padding to 1 MB
if len(b) < n < max_pad_size:
return b + zero_byte * (n - len(b))
return b
def get_byte(self):
"""
Return the next byte of the message, without decomposing it. This
is equivalent to `get_bytes(1) <get_bytes>`.
:return:
the next (`bytes`) byte of the message, or ``b'\000'`` if there
aren't any bytes remaining.
"""
return self.get_bytes(1)
def get_boolean(self):
"""
Fetch a boolean from the stream.
"""
b = self.get_bytes(1)
return b != zero_byte
def get_adaptive_int(self):
"""
Fetch an int from the stream.
:return: a 32-bit unsigned `int`.
"""
byte = self.get_bytes(1)
if byte == max_byte:
return util.inflate_long(self.get_binary())
byte += self.get_bytes(3)
return struct.unpack(">I", byte)[0]
def get_int(self):
"""
Fetch an int from the stream.
"""
return struct.unpack(">I", self.get_bytes(4))[0]
def get_int64(self):
"""
Fetch a 64-bit int from the stream.
:return: a 64-bit unsigned integer (`int`).
"""
return struct.unpack(">Q", self.get_bytes(8))[0]
def get_mpint(self):
"""
Fetch a long int (mpint) from the stream.
:return: an arbitrary-length integer (`int`).
"""
return util.inflate_long(self.get_binary())
# TODO 4.0: depending on where this is used internally or downstream, force
# users to specify get_binary instead and delete this.
def get_string(self):
"""
Fetch a "string" from the stream. This will actually be a `bytes`
object, and may contain unprintable characters. (It's not unheard of
for a string to contain another byte-stream message.)
"""
return self.get_bytes(self.get_int())
# TODO 4.0: also consider having this take over the get_string name, and
# remove this name instead.
def get_text(self):
"""
Fetch a Unicode string from the stream.
This currently operates by attempting to encode the next "string" as
``utf-8``.
"""
return u(self.get_string())
def get_binary(self):
"""
Alias for `get_string` (obtains a bytestring).
"""
return self.get_bytes(self.get_int())
def get_list(self):
"""
Fetch a list of `strings <str>` from the stream.
These are trivially encoded as comma-separated values in a string.
"""
return self.get_text().split(",")
def add_bytes(self, b):
"""
Write bytes to the stream, without any formatting.
:param bytes b: bytes to add
"""
self.packet.write(b)
return self
def add_byte(self, b):
"""
Write a single byte to the stream, without any formatting.
:param bytes b: byte to add
"""
self.packet.write(b)
return self
def add_boolean(self, b):
"""
Add a boolean value to the stream.
:param bool b: boolean value to add
"""
if b:
self.packet.write(one_byte)
else:
self.packet.write(zero_byte)
return self
def add_int(self, n):
"""
Add an integer to the stream.
:param int n: integer to add
"""
self.packet.write(struct.pack(">I", n))
return self
def add_adaptive_int(self, n):
"""
Add an integer to the stream.
:param int n: integer to add
"""
if n >= Message.big_int:
self.packet.write(max_byte)
self.add_string(util.deflate_long(n))
else:
self.packet.write(struct.pack(">I", n))
return self
def add_int64(self, n):
"""
Add a 64-bit int to the stream.
:param int n: long int to add
"""
self.packet.write(struct.pack(">Q", n))
return self
def add_mpint(self, z):
"""
Add a long int to the stream, encoded as an infinite-precision
integer. This method only works on positive numbers.
:param int z: long int to add
"""
self.add_string(util.deflate_long(z))
return self
# TODO: see the TODO for get_string/get_text/et al, this should change
# to match.
def add_string(self, s):
"""
Add a bytestring to the stream.
:param byte s: bytestring to add
"""
s = util.asbytes(s)
self.add_int(len(s))
self.packet.write(s)
return self
def add_list(self, l): # noqa: E741
"""
Add a list of strings to the stream. They are encoded identically to
a single string of values separated by commas. (Yes, really, that's
how SSH2 does it.)
:param l: list of strings to add
"""
self.add_string(",".join(l))
return self
def _add(self, i):
if type(i) is bool:
return self.add_boolean(i)
elif isinstance(i, int):
return self.add_adaptive_int(i)
elif type(i) is list:
return self.add_list(i)
else:
return self.add_string(i)
# TODO: this would never have worked for unicode strings under Python 3,
# guessing nobody/nothing ever used it for that purpose?
def add(self, *seq):
"""
Add a sequence of items to the stream. The values are encoded based
on their type: bytes, str, int, bool, or list.
.. warning::
Longs are encoded non-deterministically. Don't use this method.
:param seq: the sequence of items
"""
for item in seq:
self._add(item)

View File

@ -0,0 +1,696 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Packet handling
"""
import errno
import os
import socket
import struct
import threading
import time
from hmac import HMAC
from paramiko import util
from paramiko.common import (
linefeed_byte,
cr_byte_value,
MSG_NAMES,
DEBUG,
xffffffff,
zero_byte,
byte_ord,
)
from paramiko.util import u
from paramiko.ssh_exception import SSHException, ProxyCommandFailure
from paramiko.message import Message
def compute_hmac(key, message, digest_class):
return HMAC(key, message, digest_class).digest()
class NeedRekeyException(Exception):
"""
Exception indicating a rekey is needed.
"""
pass
def first_arg(e):
arg = None
if type(e.args) is tuple and len(e.args) > 0:
arg = e.args[0]
return arg
class Packetizer:
"""
Implementation of the base SSH packet protocol.
"""
# READ the secsh RFC's before raising these values. if anything,
# they should probably be lower.
REKEY_PACKETS = pow(2, 29)
REKEY_BYTES = pow(2, 29)
# Allow receiving this many packets after a re-key request before
# terminating
REKEY_PACKETS_OVERFLOW_MAX = pow(2, 29)
# Allow receiving this many bytes after a re-key request before terminating
REKEY_BYTES_OVERFLOW_MAX = pow(2, 29)
def __init__(self, socket):
self.__socket = socket
self.__logger = None
self.__closed = False
self.__dump_packets = False
self.__need_rekey = False
self.__init_count = 0
self.__remainder = bytes()
self._initial_kex_done = False
# used for noticing when to re-key:
self.__sent_bytes = 0
self.__sent_packets = 0
self.__received_bytes = 0
self.__received_packets = 0
self.__received_bytes_overflow = 0
self.__received_packets_overflow = 0
# current inbound/outbound ciphering:
self.__block_size_out = 8
self.__block_size_in = 8
self.__mac_size_out = 0
self.__mac_size_in = 0
self.__block_engine_out = None
self.__block_engine_in = None
self.__sdctr_out = False
self.__mac_engine_out = None
self.__mac_engine_in = None
self.__mac_key_out = bytes()
self.__mac_key_in = bytes()
self.__compress_engine_out = None
self.__compress_engine_in = None
self.__sequence_number_out = 0
self.__sequence_number_in = 0
self.__etm_out = False
self.__etm_in = False
# AEAD (eg aes128-gcm/aes256-gcm) cipher use
self.__aead_out = False
self.__aead_in = False
self.__iv_out = None
self.__iv_in = None
# lock around outbound writes (packet computation)
self.__write_lock = threading.RLock()
# keepalives:
self.__keepalive_interval = 0
self.__keepalive_last = time.time()
self.__keepalive_callback = None
self.__timer = None
self.__handshake_complete = False
self.__timer_expired = False
@property
def closed(self):
return self.__closed
def reset_seqno_out(self):
self.__sequence_number_out = 0
def reset_seqno_in(self):
self.__sequence_number_in = 0
def set_log(self, log):
"""
Set the Python log object to use for logging.
"""
self.__logger = log
def set_outbound_cipher(
self,
block_engine,
block_size,
mac_engine,
mac_size,
mac_key,
sdctr=False,
etm=False,
aead=False,
iv_out=None,
):
"""
Switch outbound data cipher.
:param etm: Set encrypt-then-mac from OpenSSH
"""
self.__block_engine_out = block_engine
self.__sdctr_out = sdctr
self.__block_size_out = block_size
self.__mac_engine_out = mac_engine
self.__mac_size_out = mac_size
self.__mac_key_out = mac_key
self.__sent_bytes = 0
self.__sent_packets = 0
self.__etm_out = etm
self.__aead_out = aead
self.__iv_out = iv_out
# wait until the reset happens in both directions before clearing
# rekey flag
self.__init_count |= 1
if self.__init_count == 3:
self.__init_count = 0
self.__need_rekey = False
def set_inbound_cipher(
self,
block_engine,
block_size,
mac_engine,
mac_size,
mac_key,
etm=False,
aead=False,
iv_in=None,
):
"""
Switch inbound data cipher.
:param etm: Set encrypt-then-mac from OpenSSH
"""
self.__block_engine_in = block_engine
self.__block_size_in = block_size
self.__mac_engine_in = mac_engine
self.__mac_size_in = mac_size
self.__mac_key_in = mac_key
self.__received_bytes = 0
self.__received_packets = 0
self.__received_bytes_overflow = 0
self.__received_packets_overflow = 0
self.__etm_in = etm
self.__aead_in = aead
self.__iv_in = iv_in
# wait until the reset happens in both directions before clearing
# rekey flag
self.__init_count |= 2
if self.__init_count == 3:
self.__init_count = 0
self.__need_rekey = False
def set_outbound_compressor(self, compressor):
self.__compress_engine_out = compressor
def set_inbound_compressor(self, compressor):
self.__compress_engine_in = compressor
def close(self):
self.__closed = True
self.__socket.close()
def set_hexdump(self, hexdump):
self.__dump_packets = hexdump
def get_hexdump(self):
return self.__dump_packets
def get_mac_size_in(self):
return self.__mac_size_in
def get_mac_size_out(self):
return self.__mac_size_out
def need_rekey(self):
"""
Returns ``True`` if a new set of keys needs to be negotiated. This
will be triggered during a packet read or write, so it should be
checked after every read or write, or at least after every few.
"""
return self.__need_rekey
def set_keepalive(self, interval, callback):
"""
Turn on/off the callback keepalive. If ``interval`` seconds pass with
no data read from or written to the socket, the callback will be
executed and the timer will be reset.
"""
self.__keepalive_interval = interval
self.__keepalive_callback = callback
self.__keepalive_last = time.time()
def read_timer(self):
self.__timer_expired = True
def start_handshake(self, timeout):
"""
Tells `Packetizer` that the handshake process started.
Starts a book keeping timer that can signal a timeout in the
handshake process.
:param float timeout: amount of seconds to wait before timing out
"""
if not self.__timer:
self.__timer = threading.Timer(float(timeout), self.read_timer)
self.__timer.start()
def handshake_timed_out(self):
"""
Checks if the handshake has timed out.
If `start_handshake` wasn't called before the call to this function,
the return value will always be `False`. If the handshake completed
before a timeout was reached, the return value will be `False`
:return: handshake time out status, as a `bool`
"""
if not self.__timer:
return False
if self.__handshake_complete:
return False
return self.__timer_expired
def complete_handshake(self):
"""
Tells `Packetizer` that the handshake has completed.
"""
if self.__timer:
self.__timer.cancel()
self.__timer_expired = False
self.__handshake_complete = True
def read_all(self, n, check_rekey=False):
"""
Read as close to N bytes as possible, blocking as long as necessary.
:param int n: number of bytes to read
:return: the data read, as a `str`
:raises:
``EOFError`` -- if the socket was closed before all the bytes could
be read
"""
out = bytes()
# handle over-reading from reading the banner line
if len(self.__remainder) > 0:
out = self.__remainder[:n]
self.__remainder = self.__remainder[n:]
n -= len(out)
while n > 0:
got_timeout = False
if self.handshake_timed_out():
raise EOFError()
try:
x = self.__socket.recv(n)
if len(x) == 0:
raise EOFError()
out += x
n -= len(x)
except socket.timeout:
got_timeout = True
except socket.error as e:
# on Linux, sometimes instead of socket.timeout, we get
# EAGAIN. this is a bug in recent (> 2.6.9) kernels but
# we need to work around it.
arg = first_arg(e)
if arg == errno.EAGAIN:
got_timeout = True
elif self.__closed:
raise EOFError()
else:
raise
if got_timeout:
if self.__closed:
raise EOFError()
if check_rekey and (len(out) == 0) and self.__need_rekey:
raise NeedRekeyException()
self._check_keepalive()
return out
def write_all(self, out):
self.__keepalive_last = time.time()
iteration_with_zero_as_return_value = 0
while len(out) > 0:
retry_write = False
try:
n = self.__socket.send(out)
except socket.timeout:
retry_write = True
except socket.error as e:
arg = first_arg(e)
if arg == errno.EAGAIN:
retry_write = True
else:
n = -1
except ProxyCommandFailure:
raise # so it doesn't get swallowed by the below catchall
except Exception:
# could be: (32, 'Broken pipe')
n = -1
if retry_write:
n = 0
if self.__closed:
n = -1
else:
if n == 0 and iteration_with_zero_as_return_value > 10:
# We shouldn't retry the write, but we didn't
# manage to send anything over the socket. This might be an
# indication that we have lost contact with the remote
# side, but are yet to receive an EOFError or other socket
# errors. Let's give it some iteration to try and catch up.
n = -1
iteration_with_zero_as_return_value += 1
if n < 0:
raise EOFError()
if n == len(out):
break
out = out[n:]
return
def readline(self, timeout):
"""
Read a line from the socket. We assume no data is pending after the
line, so it's okay to attempt large reads.
"""
buf = self.__remainder
while linefeed_byte not in buf:
buf += self._read_timeout(timeout)
n = buf.index(linefeed_byte)
self.__remainder = buf[n + 1 :]
buf = buf[:n]
if (len(buf) > 0) and (buf[-1] == cr_byte_value):
buf = buf[:-1]
return u(buf)
def _inc_iv_counter(self, iv):
# Per https://www.rfc-editor.org/rfc/rfc5647.html#section-7.1 ,
# we increment the last 8 bytes of the 12-byte IV...
iv_counter_b = iv[4:]
iv_counter = int.from_bytes(iv_counter_b, "big")
inc_iv_counter = iv_counter + 1
inc_iv_counter_b = inc_iv_counter.to_bytes(8, "big")
# ...then re-concatenate it with the static first 4 bytes
new_iv = iv[0:4] + inc_iv_counter_b
return new_iv
def send_message(self, data):
"""
Write a block of data using the current cipher, as an SSH block.
"""
# encrypt this sucka
data = data.asbytes()
cmd = byte_ord(data[0])
if cmd in MSG_NAMES:
cmd_name = MSG_NAMES[cmd]
else:
cmd_name = "${:x}".format(cmd)
orig_len = len(data)
self.__write_lock.acquire()
try:
if self.__compress_engine_out is not None:
data = self.__compress_engine_out(data)
packet = self._build_packet(data)
if self.__dump_packets:
self._log(
DEBUG,
"Write packet <{}>, length {}".format(cmd_name, orig_len),
)
self._log(DEBUG, util.format_binary(packet, "OUT: "))
if self.__block_engine_out is not None:
if self.__etm_out:
# packet length is not encrypted in EtM
out = packet[0:4] + self.__block_engine_out.update(
packet[4:]
)
elif self.__aead_out:
# Packet-length field is used as the 'associated data'
# under AES-GCM, so like EtM, it's not encrypted. See
# https://www.rfc-editor.org/rfc/rfc5647#section-7.3
out = packet[0:4] + self.__block_engine_out.encrypt(
self.__iv_out, packet[4:], packet[0:4]
)
self.__iv_out = self._inc_iv_counter(self.__iv_out)
else:
out = self.__block_engine_out.update(packet)
else:
out = packet
# Append an MAC when needed (eg, not under AES-GCM)
if self.__block_engine_out is not None and not self.__aead_out:
packed = struct.pack(">I", self.__sequence_number_out)
payload = packed + (out if self.__etm_out else packet)
out += compute_hmac(
self.__mac_key_out, payload, self.__mac_engine_out
)[: self.__mac_size_out]
next_seq = (self.__sequence_number_out + 1) & xffffffff
if next_seq == 0 and not self._initial_kex_done:
raise SSHException(
"Sequence number rolled over during initial kex!"
)
self.__sequence_number_out = next_seq
self.write_all(out)
self.__sent_bytes += len(out)
self.__sent_packets += 1
sent_too_much = (
self.__sent_packets >= self.REKEY_PACKETS
or self.__sent_bytes >= self.REKEY_BYTES
)
if sent_too_much and not self.__need_rekey:
# only ask once for rekeying
msg = "Rekeying (hit {} packets, {} bytes sent)"
self._log(
DEBUG, msg.format(self.__sent_packets, self.__sent_bytes)
)
self.__received_bytes_overflow = 0
self.__received_packets_overflow = 0
self._trigger_rekey()
finally:
self.__write_lock.release()
def read_message(self):
"""
Only one thread should ever be in this function (no other locking is
done).
:raises: `.SSHException` -- if the packet is mangled
:raises: `.NeedRekeyException` -- if the transport should rekey
"""
header = self.read_all(self.__block_size_in, check_rekey=True)
if self.__etm_in:
packet_size = struct.unpack(">I", header[:4])[0]
remaining = packet_size - self.__block_size_in + 4
packet = header[4:] + self.read_all(remaining, check_rekey=False)
mac = self.read_all(self.__mac_size_in, check_rekey=False)
mac_payload = (
struct.pack(">II", self.__sequence_number_in, packet_size)
+ packet
)
my_mac = compute_hmac(
self.__mac_key_in, mac_payload, self.__mac_engine_in
)[: self.__mac_size_in]
if not util.constant_time_bytes_eq(my_mac, mac):
raise SSHException("Mismatched MAC")
header = packet
if self.__aead_in:
# Grab unencrypted (considered 'additional data' under GCM) packet
# length.
packet_size = struct.unpack(">I", header[:4])[0]
aad = header[:4]
remaining = (
packet_size - self.__block_size_in + 4 + self.__mac_size_in
)
packet = header[4:] + self.read_all(remaining, check_rekey=False)
header = self.__block_engine_in.decrypt(self.__iv_in, packet, aad)
self.__iv_in = self._inc_iv_counter(self.__iv_in)
if self.__block_engine_in is not None and not self.__aead_in:
header = self.__block_engine_in.update(header)
if self.__dump_packets:
self._log(DEBUG, util.format_binary(header, "IN: "))
# When ETM or AEAD (GCM) are in use, we've already read the packet size
# & decrypted everything, so just set the packet back to the header we
# obtained.
if self.__etm_in or self.__aead_in:
packet = header
# Otherwise, use the older non-ETM logic
else:
packet_size = struct.unpack(">I", header[:4])[0]
# leftover contains decrypted bytes from the first block (after the
# length field)
leftover = header[4:]
if (packet_size - len(leftover)) % self.__block_size_in != 0:
raise SSHException("Invalid packet blocking")
buf = self.read_all(
packet_size + self.__mac_size_in - len(leftover)
)
packet = buf[: packet_size - len(leftover)]
post_packet = buf[packet_size - len(leftover) :]
if self.__block_engine_in is not None:
packet = self.__block_engine_in.update(packet)
packet = leftover + packet
if self.__dump_packets:
self._log(DEBUG, util.format_binary(packet, "IN: "))
if self.__mac_size_in > 0 and not self.__etm_in and not self.__aead_in:
mac = post_packet[: self.__mac_size_in]
mac_payload = (
struct.pack(">II", self.__sequence_number_in, packet_size)
+ packet
)
my_mac = compute_hmac(
self.__mac_key_in, mac_payload, self.__mac_engine_in
)[: self.__mac_size_in]
if not util.constant_time_bytes_eq(my_mac, mac):
raise SSHException("Mismatched MAC")
padding = byte_ord(packet[0])
payload = packet[1 : packet_size - padding]
if self.__dump_packets:
self._log(
DEBUG,
"Got payload ({} bytes, {} padding)".format(
packet_size, padding
),
)
if self.__compress_engine_in is not None:
payload = self.__compress_engine_in(payload)
msg = Message(payload[1:])
msg.seqno = self.__sequence_number_in
next_seq = (self.__sequence_number_in + 1) & xffffffff
if next_seq == 0 and not self._initial_kex_done:
raise SSHException(
"Sequence number rolled over during initial kex!"
)
self.__sequence_number_in = next_seq
# check for rekey
raw_packet_size = packet_size + self.__mac_size_in + 4
self.__received_bytes += raw_packet_size
self.__received_packets += 1
if self.__need_rekey:
# we've asked to rekey -- give them some packets to comply before
# dropping the connection
self.__received_bytes_overflow += raw_packet_size
self.__received_packets_overflow += 1
if (
self.__received_packets_overflow
>= self.REKEY_PACKETS_OVERFLOW_MAX
) or (
self.__received_bytes_overflow >= self.REKEY_BYTES_OVERFLOW_MAX
):
raise SSHException(
"Remote transport is ignoring rekey requests"
)
elif (self.__received_packets >= self.REKEY_PACKETS) or (
self.__received_bytes >= self.REKEY_BYTES
):
# only ask once for rekeying
err = "Rekeying (hit {} packets, {} bytes received)"
self._log(
DEBUG,
err.format(self.__received_packets, self.__received_bytes),
)
self.__received_bytes_overflow = 0
self.__received_packets_overflow = 0
self._trigger_rekey()
cmd = byte_ord(payload[0])
if cmd in MSG_NAMES:
cmd_name = MSG_NAMES[cmd]
else:
cmd_name = "${:x}".format(cmd)
if self.__dump_packets:
self._log(
DEBUG,
"Read packet <{}>, length {}".format(cmd_name, len(payload)),
)
return cmd, msg
# ...protected...
def _log(self, level, msg):
if self.__logger is None:
return
if issubclass(type(msg), list):
for m in msg:
self.__logger.log(level, m)
else:
self.__logger.log(level, msg)
def _check_keepalive(self):
if (
not self.__keepalive_interval
or not self.__block_engine_out
or self.__need_rekey
):
# wait till we're encrypting, and not in the middle of rekeying
return
now = time.time()
if now > self.__keepalive_last + self.__keepalive_interval:
self.__keepalive_callback()
self.__keepalive_last = now
def _read_timeout(self, timeout):
start = time.time()
while True:
try:
x = self.__socket.recv(128)
if len(x) == 0:
raise EOFError()
break
except socket.timeout:
pass
if self.__closed:
raise EOFError()
now = time.time()
if now - start >= timeout:
raise socket.timeout()
return x
def _build_packet(self, payload):
# pad up at least 4 bytes, to nearest block-size (usually 8)
bsize = self.__block_size_out
# do not include payload length in computations for padding in EtM mode
# (payload length won't be encrypted)
addlen = 4 if self.__etm_out or self.__aead_out else 8
padding = 3 + bsize - ((len(payload) + addlen) % bsize)
packet = struct.pack(">IB", len(payload) + padding + 1, padding)
packet += payload
if self.__sdctr_out or self.__block_engine_out is None:
# cute trick i caught openssh doing: if we're not encrypting or
# SDCTR mode (RFC4344),
# don't waste random bytes for the padding
packet += zero_byte * padding
else:
packet += os.urandom(padding)
return packet
def _trigger_rekey(self):
# outside code should check for this flag
self.__need_rekey = True

View File

@ -0,0 +1,148 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Abstraction of a one-way pipe where the read end can be used in
`select.select`. Normally this is trivial, but Windows makes it nearly
impossible.
The pipe acts like an Event, which can be set or cleared. When set, the pipe
will trigger as readable in `select <select.select>`.
"""
import sys
import os
import socket
def make_pipe():
if sys.platform[:3] != "win":
p = PosixPipe()
else:
p = WindowsPipe()
return p
class PosixPipe:
def __init__(self):
self._rfd, self._wfd = os.pipe()
self._set = False
self._forever = False
self._closed = False
def close(self):
os.close(self._rfd)
os.close(self._wfd)
# used for unit tests:
self._closed = True
def fileno(self):
return self._rfd
def clear(self):
if not self._set or self._forever:
return
os.read(self._rfd, 1)
self._set = False
def set(self):
if self._set or self._closed:
return
self._set = True
os.write(self._wfd, b"*")
def set_forever(self):
self._forever = True
self.set()
class WindowsPipe:
"""
On Windows, only an OS-level "WinSock" may be used in select(), but reads
and writes must be to the actual socket object.
"""
def __init__(self):
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.bind(("127.0.0.1", 0))
serv.listen(1)
# need to save sockets in _rsock/_wsock so they don't get closed
self._rsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._rsock.connect(("127.0.0.1", serv.getsockname()[1]))
self._wsock, addr = serv.accept()
serv.close()
self._set = False
self._forever = False
self._closed = False
def close(self):
self._rsock.close()
self._wsock.close()
# used for unit tests:
self._closed = True
def fileno(self):
return self._rsock.fileno()
def clear(self):
if not self._set or self._forever:
return
self._rsock.recv(1)
self._set = False
def set(self):
if self._set or self._closed:
return
self._set = True
self._wsock.send(b"*")
def set_forever(self):
self._forever = True
self.set()
class OrPipe:
def __init__(self, pipe):
self._set = False
self._partner = None
self._pipe = pipe
def set(self):
self._set = True
if not self._partner._set:
self._pipe.set()
def clear(self):
self._set = False
if not self._partner._set:
self._pipe.clear()
def make_or_pipe(pipe):
"""
wraps a pipe into two pipe-like objects which are "or"d together to
affect the real pipe. if either returned pipe is set, the wrapped pipe
is set. when both are cleared, the wrapped pipe is cleared.
"""
p1 = OrPipe(pipe)
p2 = OrPipe(pipe)
p1._partner = p2
p2._partner = p1
return p1, p2

View File

@ -0,0 +1,957 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Common API for all public keys.
"""
import base64
from base64 import encodebytes, decodebytes
from binascii import unhexlify
import os
from pathlib import Path
from hashlib import md5, sha256
import re
import struct
import bcrypt
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import padding, serialization
from cryptography.hazmat.primitives.ciphers import algorithms, modes, Cipher
from cryptography.hazmat.primitives import asymmetric
from paramiko import util
from paramiko.util import u, b
from paramiko.common import o600
from paramiko.ssh_exception import SSHException, PasswordRequiredException
from paramiko.message import Message
# TripleDES is moving from `cryptography.hazmat.primitives.ciphers.algorithms`
# in cryptography>=43.0.0 to `cryptography.hazmat.decrepit.ciphers.algorithms`
# It will be removed from `cryptography.hazmat.primitives.ciphers.algorithms`
# in cryptography==48.0.0.
#
# Source References:
# - https://github.com/pyca/cryptography/commit/722a6393e61b3ac
# - https://github.com/pyca/cryptography/pull/11407/files
try:
from cryptography.hazmat.decrepit.ciphers.algorithms import TripleDES
except ImportError:
from cryptography.hazmat.primitives.ciphers.algorithms import TripleDES
OPENSSH_AUTH_MAGIC = b"openssh-key-v1\x00"
def _unpad_openssh(data):
# At the moment, this is only used for unpadding private keys on disk. This
# really ought to be made constant time (possibly by upstreaming this logic
# into pyca/cryptography).
padding_length = data[-1]
if 0x20 <= padding_length < 0x7F:
return data # no padding, last byte part comment (printable ascii)
if padding_length > 15:
raise SSHException("Invalid key")
for i in range(padding_length):
if data[i - padding_length] != i + 1:
raise SSHException("Invalid key")
return data[:-padding_length]
class UnknownKeyType(Exception):
"""
An unknown public/private key algorithm was attempted to be read.
"""
def __init__(self, key_type=None, key_bytes=None):
self.key_type = key_type
self.key_bytes = key_bytes
def __str__(self):
return f"UnknownKeyType(type={self.key_type!r}, bytes=<{len(self.key_bytes)}>)" # noqa
class PKey:
"""
Base class for public keys.
Also includes some "meta" level convenience constructors such as
`.from_type_string`.
"""
# known encryption types for private key files:
_CIPHER_TABLE = {
"AES-128-CBC": {
"cipher": algorithms.AES,
"keysize": 16,
"blocksize": 16,
"mode": modes.CBC,
},
"AES-256-CBC": {
"cipher": algorithms.AES,
"keysize": 32,
"blocksize": 16,
"mode": modes.CBC,
},
"DES-EDE3-CBC": {
"cipher": TripleDES,
"keysize": 24,
"blocksize": 8,
"mode": modes.CBC,
},
}
_PRIVATE_KEY_FORMAT_ORIGINAL = 1
_PRIVATE_KEY_FORMAT_OPENSSH = 2
BEGIN_TAG = re.compile(
r"^-{5}BEGIN (RSA|DSA|EC|OPENSSH) PRIVATE KEY-{5}\s*$"
)
END_TAG = re.compile(r"^-{5}END (RSA|DSA|EC|OPENSSH) PRIVATE KEY-{5}\s*$")
@staticmethod
def from_path(path, passphrase=None):
"""
Attempt to instantiate appropriate key subclass from given file path.
:param Path path: The path to load (may also be a `str`).
:returns:
A `PKey` subclass instance.
:raises:
`UnknownKeyType`, if our crypto backend doesn't know this key type.
.. versionadded:: 3.2
"""
# TODO: make sure sphinx is reading Path right in param list...
# Lazy import to avoid circular import issues
from paramiko import DSSKey, RSAKey, Ed25519Key, ECDSAKey
# Normalize to string, as cert suffix isn't quite an extension, so
# pathlib isn't useful for this.
path = str(path)
# Sort out cert vs key, i.e. it is 'legal' to hand this kind of API
# /either/ the key /or/ the cert, when there is a key/cert pair.
cert_suffix = "-cert.pub"
if str(path).endswith(cert_suffix):
key_path = path[: -len(cert_suffix)]
cert_path = path
else:
key_path = path
cert_path = path + cert_suffix
key_path = Path(key_path).expanduser()
cert_path = Path(cert_path).expanduser()
data = key_path.read_bytes()
# Like OpenSSH, try modern/OpenSSH-specific key load first
try:
loaded = serialization.load_ssh_private_key(
data=data, password=passphrase
)
# Then fall back to assuming legacy PEM type
except ValueError:
loaded = serialization.load_pem_private_key(
data=data, password=passphrase
)
# TODO Python 3.10: match statement? (NOTE: we cannot use a dict
# because the results from the loader are literal backend, eg openssl,
# private classes, so isinstance tests work but exact 'x class is y'
# tests will not work)
# TODO: leverage already-parsed/math'd obj to avoid duplicate cpu
# cycles? seemingly requires most of our key subclasses to be rewritten
# to be cryptography-object-forward. this is still likely faster than
# the old SSHClient code that just tried instantiating every class!
key_class = None
if isinstance(loaded, asymmetric.dsa.DSAPrivateKey):
key_class = DSSKey
elif isinstance(loaded, asymmetric.rsa.RSAPrivateKey):
key_class = RSAKey
elif isinstance(loaded, asymmetric.ed25519.Ed25519PrivateKey):
key_class = Ed25519Key
elif isinstance(loaded, asymmetric.ec.EllipticCurvePrivateKey):
key_class = ECDSAKey
else:
raise UnknownKeyType(key_bytes=data, key_type=loaded.__class__)
with key_path.open() as fd:
key = key_class.from_private_key(fd, password=passphrase)
if cert_path.exists():
# load_certificate can take Message, path-str, or value-str
key.load_certificate(str(cert_path))
return key
@staticmethod
def from_type_string(key_type, key_bytes):
"""
Given type `str` & raw `bytes`, return a `PKey` subclass instance.
For example, ``PKey.from_type_string("ssh-ed25519", <public bytes>)``
will (if successful) return a new `.Ed25519Key`.
:param str key_type:
The key type, eg ``"ssh-ed25519"``.
:param bytes key_bytes:
The raw byte data forming the key material, as expected by
subclasses' ``data`` parameter.
:returns:
A `PKey` subclass instance.
:raises:
`UnknownKeyType`, if no registered classes knew about this type.
.. versionadded:: 3.2
"""
from paramiko import key_classes
for key_class in key_classes:
if key_type in key_class.identifiers():
# TODO: needs to passthru things like passphrase
return key_class(data=key_bytes)
raise UnknownKeyType(key_type=key_type, key_bytes=key_bytes)
@classmethod
def identifiers(cls):
"""
returns an iterable of key format/name strings this class can handle.
Most classes only have a single identifier, and thus this default
implementation suffices; see `.ECDSAKey` for one example of an
override.
"""
return [cls.name]
# TODO 4.0: make this and subclasses consistent, some of our own
# classmethods even assume kwargs we don't define!
# TODO 4.0: prob also raise NotImplementedError instead of pass'ing; the
# contract is pretty obviously that you need to handle msg/data/filename
# appropriately. (If 'pass' is a concession to testing, see about doing the
# work to fix the tests instead)
def __init__(self, msg=None, data=None):
"""
Create a new instance of this public key type. If ``msg`` is given,
the key's public part(s) will be filled in from the message. If
``data`` is given, the key's public part(s) will be filled in from
the string.
:param .Message msg:
an optional SSH `.Message` containing a public key of this type.
:param bytes data:
optional, the bytes of a public key of this type
:raises: `.SSHException` --
if a key cannot be created from the ``data`` or ``msg`` given, or
no key was passed in.
"""
pass
# TODO: arguably this might want to be __str__ instead? ehh
# TODO: ditto the interplay between showing class name (currently we just
# say PKey writ large) and algorithm (usually == class name, but not
# always, also sometimes shows certificate-ness)
# TODO: if we do change it, we also want to tweak eg AgentKey, as it
# currently displays agent-ness with a suffix
def __repr__(self):
comment = ""
# Works for AgentKey, may work for others?
if hasattr(self, "comment") and self.comment:
comment = f", comment={self.comment!r}"
return f"PKey(alg={self.algorithm_name}, bits={self.get_bits()}, fp={self.fingerprint}{comment})" # noqa
# TODO 4.0: just merge into __bytes__ (everywhere)
def asbytes(self):
"""
Return a string of an SSH `.Message` made up of the public part(s) of
this key. This string is suitable for passing to `__init__` to
re-create the key object later.
"""
return bytes()
def __bytes__(self):
return self.asbytes()
def __eq__(self, other):
return isinstance(other, PKey) and self._fields == other._fields
def __hash__(self):
return hash(self._fields)
@property
def _fields(self):
raise NotImplementedError
def get_name(self):
"""
Return the name of this private key implementation.
:return:
name of this private key type, in SSH terminology, as a `str` (for
example, ``"ssh-rsa"``).
"""
return ""
@property
def algorithm_name(self):
"""
Return the key algorithm identifier for this key.
Similar to `get_name`, but aimed at pure algorithm name instead of SSH
protocol field value.
"""
# Nuke the leading 'ssh-'
# TODO in Python 3.9: use .removeprefix()
name = self.get_name().replace("ssh-", "")
# Trim any cert suffix (but leave the -cert, as OpenSSH does)
cert_tail = "-cert-v01@openssh.com"
if cert_tail in name:
name = name.replace(cert_tail, "-cert")
# Nuke any eg ECDSA suffix, OpenSSH does basically this too.
else:
name = name.split("-")[0]
return name.upper()
def get_bits(self):
"""
Return the number of significant bits in this key. This is useful
for judging the relative security of a key.
:return: bits in the key (as an `int`)
"""
# TODO 4.0: raise NotImplementedError, 0 is unlikely to ever be
# _correct_ and nothing in the critical path seems to use this.
return 0
def can_sign(self):
"""
Return ``True`` if this key has the private part necessary for signing
data.
"""
return False
def get_fingerprint(self):
"""
Return an MD5 fingerprint of the public part of this key. Nothing
secret is revealed.
:return:
a 16-byte `string <str>` (binary) of the MD5 fingerprint, in SSH
format.
"""
return md5(self.asbytes()).digest()
@property
def fingerprint(self):
"""
Modern fingerprint property designed to be comparable to OpenSSH.
Currently only does SHA256 (the OpenSSH default).
.. versionadded:: 3.2
"""
hashy = sha256(bytes(self))
hash_name = hashy.name.upper()
b64ed = encodebytes(hashy.digest())
cleaned = u(b64ed).strip().rstrip("=") # yes, OpenSSH does this too!
return f"{hash_name}:{cleaned}"
def get_base64(self):
"""
Return a base64 string containing the public part of this key. Nothing
secret is revealed. This format is compatible with that used to store
public key files or recognized host keys.
:return: a base64 `string <str>` containing the public part of the key.
"""
return u(encodebytes(self.asbytes())).replace("\n", "")
def sign_ssh_data(self, data, algorithm=None):
"""
Sign a blob of data with this private key, and return a `.Message`
representing an SSH signature message.
:param bytes data:
the data to sign.
:param str algorithm:
the signature algorithm to use, if different from the key's
internal name. Default: ``None``.
:return: an SSH signature `message <.Message>`.
.. versionchanged:: 2.9
Added the ``algorithm`` kwarg.
"""
return bytes()
def verify_ssh_sig(self, data, msg):
"""
Given a blob of data, and an SSH message representing a signature of
that data, verify that it was signed with this key.
:param bytes data: the data that was signed.
:param .Message msg: an SSH signature message
:return:
``True`` if the signature verifies correctly; ``False`` otherwise.
"""
return False
@classmethod
def from_private_key_file(cls, filename, password=None):
"""
Create a key object by reading a private key file. If the private
key is encrypted and ``password`` is not ``None``, the given password
will be used to decrypt the key (otherwise `.PasswordRequiredException`
is thrown). Through the magic of Python, this factory method will
exist in all subclasses of PKey (such as `.RSAKey` or `.DSSKey`), but
is useless on the abstract PKey class.
:param str filename: name of the file to read
:param str password:
an optional password to use to decrypt the key file, if it's
encrypted
:return: a new `.PKey` based on the given private key
:raises: ``IOError`` -- if there was an error reading the file
:raises: `.PasswordRequiredException` -- if the private key file is
encrypted, and ``password`` is ``None``
:raises: `.SSHException` -- if the key file is invalid
"""
key = cls(filename=filename, password=password)
return key
@classmethod
def from_private_key(cls, file_obj, password=None):
"""
Create a key object by reading a private key from a file (or file-like)
object. If the private key is encrypted and ``password`` is not
``None``, the given password will be used to decrypt the key (otherwise
`.PasswordRequiredException` is thrown).
:param file_obj: the file-like object to read from
:param str password:
an optional password to use to decrypt the key, if it's encrypted
:return: a new `.PKey` based on the given private key
:raises: ``IOError`` -- if there was an error reading the key
:raises: `.PasswordRequiredException` --
if the private key file is encrypted, and ``password`` is ``None``
:raises: `.SSHException` -- if the key file is invalid
"""
key = cls(file_obj=file_obj, password=password)
return key
def write_private_key_file(self, filename, password=None):
"""
Write private key contents into a file. If the password is not
``None``, the key is encrypted before writing.
:param str filename: name of the file to write
:param str password:
an optional password to use to encrypt the key file
:raises: ``IOError`` -- if there was an error writing the file
:raises: `.SSHException` -- if the key is invalid
"""
raise Exception("Not implemented in PKey")
def write_private_key(self, file_obj, password=None):
"""
Write private key contents into a file (or file-like) object. If the
password is not ``None``, the key is encrypted before writing.
:param file_obj: the file-like object to write into
:param str password: an optional password to use to encrypt the key
:raises: ``IOError`` -- if there was an error writing to the file
:raises: `.SSHException` -- if the key is invalid
"""
# TODO 4.0: NotImplementedError (plus everywhere else in here)
raise Exception("Not implemented in PKey")
def _read_private_key_file(self, tag, filename, password=None):
"""
Read an SSH2-format private key file, looking for a string of the type
``"BEGIN xxx PRIVATE KEY"`` for some ``xxx``, base64-decode the text we
find, and return it as a string. If the private key is encrypted and
``password`` is not ``None``, the given password will be used to
decrypt the key (otherwise `.PasswordRequiredException` is thrown).
:param str tag: ``"RSA"`` or ``"DSA"``, the tag used to mark the
data block.
:param str filename: name of the file to read.
:param str password:
an optional password to use to decrypt the key file, if it's
encrypted.
:return: the `bytes` that make up the private key.
:raises: ``IOError`` -- if there was an error reading the file.
:raises: `.PasswordRequiredException` -- if the private key file is
encrypted, and ``password`` is ``None``.
:raises: `.SSHException` -- if the key file is invalid.
"""
with open(filename, "r") as f:
data = self._read_private_key(tag, f, password)
return data
def _read_private_key(self, tag, f, password=None):
lines = f.readlines()
if not lines:
raise SSHException("no lines in {} private key file".format(tag))
# find the BEGIN tag
start = 0
m = self.BEGIN_TAG.match(lines[start])
line_range = len(lines) - 1
while start < line_range and not m:
start += 1
m = self.BEGIN_TAG.match(lines[start])
start += 1
keytype = m.group(1) if m else None
if start >= len(lines) or keytype is None:
raise SSHException("not a valid {} private key file".format(tag))
# find the END tag
end = start
m = self.END_TAG.match(lines[end])
while end < line_range and not m:
end += 1
m = self.END_TAG.match(lines[end])
if keytype == tag:
data = self._read_private_key_pem(lines, end, password)
pkformat = self._PRIVATE_KEY_FORMAT_ORIGINAL
elif keytype == "OPENSSH":
data = self._read_private_key_openssh(lines[start:end], password)
pkformat = self._PRIVATE_KEY_FORMAT_OPENSSH
else:
raise SSHException(
"encountered {} key, expected {} key".format(keytype, tag)
)
return pkformat, data
def _got_bad_key_format_id(self, id_):
err = "{}._read_private_key() spat out an unknown key format id '{}'"
raise SSHException(err.format(self.__class__.__name__, id_))
def _read_private_key_pem(self, lines, end, password):
start = 0
# parse any headers first
headers = {}
start += 1
while start < len(lines):
line = lines[start].split(": ")
if len(line) == 1:
break
headers[line[0].lower()] = line[1].strip()
start += 1
# if we trudged to the end of the file, just try to cope.
try:
data = decodebytes(b("".join(lines[start:end])))
except base64.binascii.Error as e:
raise SSHException("base64 decoding error: {}".format(e))
if "proc-type" not in headers:
# unencryped: done
return data
# encrypted keyfile: will need a password
proc_type = headers["proc-type"]
if proc_type != "4,ENCRYPTED":
raise SSHException(
'Unknown private key structure "{}"'.format(proc_type)
)
try:
encryption_type, saltstr = headers["dek-info"].split(",")
except:
raise SSHException("Can't parse DEK-info in private key file")
if encryption_type not in self._CIPHER_TABLE:
raise SSHException(
'Unknown private key cipher "{}"'.format(encryption_type)
)
# if no password was passed in,
# raise an exception pointing out that we need one
if password is None:
raise PasswordRequiredException("Private key file is encrypted")
cipher = self._CIPHER_TABLE[encryption_type]["cipher"]
keysize = self._CIPHER_TABLE[encryption_type]["keysize"]
mode = self._CIPHER_TABLE[encryption_type]["mode"]
salt = unhexlify(b(saltstr))
key = util.generate_key_bytes(md5, salt, password, keysize)
decryptor = Cipher(
cipher(key), mode(salt), backend=default_backend()
).decryptor()
decrypted_data = decryptor.update(data) + decryptor.finalize()
unpadder = padding.PKCS7(cipher.block_size).unpadder()
try:
return unpadder.update(decrypted_data) + unpadder.finalize()
except ValueError:
raise SSHException("Bad password or corrupt private key file")
def _read_private_key_openssh(self, lines, password):
"""
Read the new OpenSSH SSH2 private key format available
since OpenSSH version 6.5
Reference:
https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
"""
try:
data = decodebytes(b("".join(lines)))
except base64.binascii.Error as e:
raise SSHException("base64 decoding error: {}".format(e))
# read data struct
auth_magic = data[:15]
if auth_magic != OPENSSH_AUTH_MAGIC:
raise SSHException("unexpected OpenSSH key header encountered")
cstruct = self._uint32_cstruct_unpack(data[15:], "sssur")
cipher, kdfname, kdf_options, num_pubkeys, remainder = cstruct
# For now, just support 1 key.
if num_pubkeys > 1:
raise SSHException(
"unsupported: private keyfile has multiple keys"
)
pubkey, privkey_blob = self._uint32_cstruct_unpack(remainder, "ss")
if kdfname == b("bcrypt"):
if cipher == b("aes256-cbc"):
mode = modes.CBC
elif cipher == b("aes256-ctr"):
mode = modes.CTR
else:
raise SSHException(
"unknown cipher `{}` used in private key file".format(
cipher.decode("utf-8")
)
)
# Encrypted private key.
# If no password was passed in, raise an exception pointing
# out that we need one
if password is None:
raise PasswordRequiredException(
"private key file is encrypted"
)
# Unpack salt and rounds from kdfoptions
salt, rounds = self._uint32_cstruct_unpack(kdf_options, "su")
# run bcrypt kdf to derive key and iv/nonce (32 + 16 bytes)
key_iv = bcrypt.kdf(
b(password),
b(salt),
48,
rounds,
# We can't control how many rounds are on disk, so no sense
# warning about it.
ignore_few_rounds=True,
)
key = key_iv[:32]
iv = key_iv[32:]
# decrypt private key blob
decryptor = Cipher(
algorithms.AES(key), mode(iv), default_backend()
).decryptor()
decrypted_privkey = decryptor.update(privkey_blob)
decrypted_privkey += decryptor.finalize()
elif cipher == b("none") and kdfname == b("none"):
# Unencrypted private key
decrypted_privkey = privkey_blob
else:
raise SSHException(
"unknown cipher or kdf used in private key file"
)
# Unpack private key and verify checkints
cstruct = self._uint32_cstruct_unpack(decrypted_privkey, "uusr")
checkint1, checkint2, keytype, keydata = cstruct
if checkint1 != checkint2:
raise SSHException(
"OpenSSH private key file checkints do not match"
)
return _unpad_openssh(keydata)
def _uint32_cstruct_unpack(self, data, strformat):
"""
Used to read new OpenSSH private key format.
Unpacks a c data structure containing a mix of 32-bit uints and
variable length strings prefixed by 32-bit uint size field,
according to the specified format. Returns the unpacked vars
in a tuple.
Format strings:
s - denotes a string
i - denotes a long integer, encoded as a byte string
u - denotes a 32-bit unsigned integer
r - the remainder of the input string, returned as a string
"""
arr = []
idx = 0
try:
for f in strformat:
if f == "s":
# string
s_size = struct.unpack(">L", data[idx : idx + 4])[0]
idx += 4
s = data[idx : idx + s_size]
idx += s_size
arr.append(s)
if f == "i":
# long integer
s_size = struct.unpack(">L", data[idx : idx + 4])[0]
idx += 4
s = data[idx : idx + s_size]
idx += s_size
i = util.inflate_long(s, True)
arr.append(i)
elif f == "u":
# 32-bit unsigned int
u = struct.unpack(">L", data[idx : idx + 4])[0]
idx += 4
arr.append(u)
elif f == "r":
# remainder as string
s = data[idx:]
arr.append(s)
break
except Exception as e:
# PKey-consuming code frequently wants to save-and-skip-over issues
# with loading keys, and uses SSHException as the (really friggin
# awful) signal for this. So for now...we do this.
raise SSHException(str(e))
return tuple(arr)
def _write_private_key_file(self, filename, key, format, password=None):
"""
Write an SSH2-format private key file in a form that can be read by
paramiko or openssh. If no password is given, the key is written in
a trivially-encoded format (base64) which is completely insecure. If
a password is given, DES-EDE3-CBC is used.
:param str tag:
``"RSA"`` or ``"DSA"``, the tag used to mark the data block.
:param filename: name of the file to write.
:param bytes data: data blob that makes up the private key.
:param str password: an optional password to use to encrypt the file.
:raises: ``IOError`` -- if there was an error writing the file.
"""
# Ensure that we create new key files directly with a user-only mode,
# instead of opening, writing, then chmodding, which leaves us open to
# CVE-2022-24302.
with os.fdopen(
os.open(
filename,
# NOTE: O_TRUNC is a noop on new files, and O_CREAT is a noop
# on existing files, so using all 3 in both cases is fine.
flags=os.O_WRONLY | os.O_TRUNC | os.O_CREAT,
# Ditto the use of the 'mode' argument; it should be safe to
# give even for existing files (though it will not act like a
# chmod in that case).
mode=o600,
),
# Yea, you still gotta inform the FLO that it is in "write" mode.
"w",
) as f:
self._write_private_key(f, key, format, password=password)
def _write_private_key(self, f, key, format, password=None):
if password is None:
encryption = serialization.NoEncryption()
else:
encryption = serialization.BestAvailableEncryption(b(password))
f.write(
key.private_bytes(
serialization.Encoding.PEM, format, encryption
).decode()
)
def _check_type_and_load_cert(self, msg, key_type, cert_type):
"""
Perform message type-checking & optional certificate loading.
This includes fast-forwarding cert ``msg`` objects past the nonce, so
that the subsequent fields are the key numbers; thus the caller may
expect to treat the message as key material afterwards either way.
The obtained key type is returned for classes which need to know what
it was (e.g. ECDSA.)
"""
# Normalization; most classes have a single key type and give a string,
# but eg ECDSA is a 1:N mapping.
key_types = key_type
cert_types = cert_type
if isinstance(key_type, str):
key_types = [key_types]
if isinstance(cert_types, str):
cert_types = [cert_types]
# Can't do much with no message, that should've been handled elsewhere
if msg is None:
raise SSHException("Key object may not be empty")
# First field is always key type, in either kind of object. (make sure
# we rewind before grabbing it - sometimes caller had to do their own
# introspection first!)
msg.rewind()
type_ = msg.get_text()
# Regular public key - nothing special to do besides the implicit
# type check.
if type_ in key_types:
pass
# OpenSSH-compatible certificate - store full copy as .public_blob
# (so signing works correctly) and then fast-forward past the
# nonce.
elif type_ in cert_types:
# This seems the cleanest way to 'clone' an already-being-read
# message; they're *IO objects at heart and their .getvalue()
# always returns the full value regardless of pointer position.
self.load_certificate(Message(msg.asbytes()))
# Read out nonce as it comes before the public numbers - our caller
# is likely going to use the (only borrowed by us, not owned)
# 'msg' object for loading those numbers right after this.
# TODO: usefully interpret it & other non-public-number fields
# (requires going back into per-type subclasses.)
msg.get_string()
else:
err = "Invalid key (class: {}, data type: {}"
raise SSHException(err.format(self.__class__.__name__, type_))
def load_certificate(self, value):
"""
Supplement the private key contents with data loaded from an OpenSSH
public key (``.pub``) or certificate (``-cert.pub``) file, a string
containing such a file, or a `.Message` object.
The .pub contents adds no real value, since the private key
file includes sufficient information to derive the public
key info. For certificates, however, this can be used on
the client side to offer authentication requests to the server
based on certificate instead of raw public key.
See:
https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.certkeys
Note: very little effort is made to validate the certificate contents,
that is for the server to decide if it is good enough to authenticate
successfully.
"""
if isinstance(value, Message):
constructor = "from_message"
elif os.path.isfile(value):
constructor = "from_file"
else:
constructor = "from_string"
blob = getattr(PublicBlob, constructor)(value)
if not blob.key_type.startswith(self.get_name()):
err = "PublicBlob type {} incompatible with key type {}"
raise ValueError(err.format(blob.key_type, self.get_name()))
self.public_blob = blob
# General construct for an OpenSSH style Public Key blob
# readable from a one-line file of the format:
# <key-name> <base64-blob> [<comment>]
# Of little value in the case of standard public keys
# {ssh-rsa, ssh-dss, ssh-ecdsa, ssh-ed25519}, but should
# provide rudimentary support for {*-cert.v01}
class PublicBlob:
"""
OpenSSH plain public key or OpenSSH signed public key (certificate).
Tries to be as dumb as possible and barely cares about specific
per-key-type data.
.. note::
Most of the time you'll want to call `from_file`, `from_string` or
`from_message` for useful instantiation, the main constructor is
basically "I should be using ``attrs`` for this."
"""
def __init__(self, type_, blob, comment=None):
"""
Create a new public blob of given type and contents.
:param str type_: Type indicator, eg ``ssh-rsa``.
:param bytes blob: The blob bytes themselves.
:param str comment: A comment, if one was given (e.g. file-based.)
"""
self.key_type = type_
self.key_blob = blob
self.comment = comment
@classmethod
def from_file(cls, filename):
"""
Create a public blob from a ``-cert.pub``-style file on disk.
"""
with open(filename) as f:
string = f.read()
return cls.from_string(string)
@classmethod
def from_string(cls, string):
"""
Create a public blob from a ``-cert.pub``-style string.
"""
fields = string.split(None, 2)
if len(fields) < 2:
msg = "Not enough fields for public blob: {}"
raise ValueError(msg.format(fields))
key_type = fields[0]
key_blob = decodebytes(b(fields[1]))
try:
comment = fields[2].strip()
except IndexError:
comment = None
# Verify that the blob message first (string) field matches the
# key_type
m = Message(key_blob)
blob_type = m.get_text()
if blob_type != key_type:
deets = "key type={!r}, but blob type={!r}".format(
key_type, blob_type
)
raise ValueError("Invalid PublicBlob contents: {}".format(deets))
# All good? All good.
return cls(type_=key_type, blob=key_blob, comment=comment)
@classmethod
def from_message(cls, message):
"""
Create a public blob from a network `.Message`.
Specifically, a cert-bearing pubkey auth packet, because by definition
OpenSSH-style certificates 'are' their own network representation."
"""
type_ = message.get_text()
return cls(type_=type_, blob=message.asbytes())
def __str__(self):
ret = "{} public key/certificate".format(self.key_type)
if self.comment:
ret += "- {}".format(self.comment)
return ret
def __eq__(self, other):
# Just piggyback on Message/BytesIO, since both of these should be one.
return self and other and self.key_blob == other.key_blob
def __ne__(self, other):
return not self == other

View File

@ -0,0 +1,148 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Utility functions for dealing with primes.
"""
import os
from paramiko import util
from paramiko.common import byte_mask
from paramiko.ssh_exception import SSHException
def _roll_random(n):
"""returns a random # from 0 to N-1"""
bits = util.bit_length(n - 1)
byte_count = (bits + 7) // 8
hbyte_mask = pow(2, bits % 8) - 1
# so here's the plan:
# we fetch as many random bits as we'd need to fit N-1, and if the
# generated number is >= N, we try again. in the worst case (N-1 is a
# power of 2), we have slightly better than 50% odds of getting one that
# fits, so i can't guarantee that this loop will ever finish, but the odds
# of it looping forever should be infinitesimal.
while True:
x = os.urandom(byte_count)
if hbyte_mask > 0:
x = byte_mask(x[0], hbyte_mask) + x[1:]
num = util.inflate_long(x, 1)
if num < n:
break
return num
class ModulusPack:
"""
convenience object for holding the contents of the /etc/ssh/moduli file,
on systems that have such a file.
"""
def __init__(self):
# pack is a hash of: bits -> [ (generator, modulus) ... ]
self.pack = {}
self.discarded = []
def _parse_modulus(self, line):
(
timestamp,
mod_type,
tests,
tries,
size,
generator,
modulus,
) = line.split()
mod_type = int(mod_type)
tests = int(tests)
tries = int(tries)
size = int(size)
generator = int(generator)
modulus = int(modulus, 16)
# weed out primes that aren't at least:
# type 2 (meets basic structural requirements)
# test 4 (more than just a small-prime sieve)
# tries < 100 if test & 4 (at least 100 tries of miller-rabin)
if (
mod_type < 2
or tests < 4
or (tests & 4 and tests < 8 and tries < 100)
):
self.discarded.append(
(modulus, "does not meet basic requirements")
)
return
if generator == 0:
generator = 2
# there's a bug in the ssh "moduli" file (yeah, i know: shock! dismay!
# call cnn!) where it understates the bit lengths of these primes by 1.
# this is okay.
bl = util.bit_length(modulus)
if (bl != size) and (bl != size + 1):
self.discarded.append(
(modulus, "incorrectly reported bit length {}".format(size))
)
return
if bl not in self.pack:
self.pack[bl] = []
self.pack[bl].append((generator, modulus))
def read_file(self, filename):
"""
:raises IOError: passed from any file operations that fail.
"""
self.pack = {}
with open(filename, "r") as f:
for line in f:
line = line.strip()
if (len(line) == 0) or (line[0] == "#"):
continue
try:
self._parse_modulus(line)
except:
continue
def get_modulus(self, min, prefer, max):
bitsizes = sorted(self.pack.keys())
if len(bitsizes) == 0:
raise SSHException("no moduli available")
good = -1
# find nearest bitsize >= preferred
for b in bitsizes:
if (b >= prefer) and (b <= max) and (b < good or good == -1):
good = b
# if that failed, find greatest bitsize >= min
if good == -1:
for b in bitsizes:
if (b >= min) and (b <= max) and (b > good):
good = b
if good == -1:
# their entire (min, max) range has no intersection with our range.
# if their range is below ours, pick the smallest. otherwise pick
# the largest. it'll be out of their range requirement either way,
# but we'll be sending them the closest one we have.
good = bitsizes[0]
if min > good:
good = bitsizes[-1]
# now pick a random modulus of this bitsize
n = _roll_random(len(self.pack[good]))
return self.pack[good][n]

View File

@ -0,0 +1,134 @@
# Copyright (C) 2012 Yipit, Inc <coders@yipit.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import shlex
import signal
from select import select
import socket
import time
# Try-and-ignore import so platforms w/o subprocess (eg Google App Engine) can
# still import paramiko.
subprocess, subprocess_import_error = None, None
try:
import subprocess
except ImportError as e:
subprocess_import_error = e
from paramiko.ssh_exception import ProxyCommandFailure
from paramiko.util import ClosingContextManager
class ProxyCommand(ClosingContextManager):
"""
Wraps a subprocess running ProxyCommand-driven programs.
This class implements a the socket-like interface needed by the
`.Transport` and `.Packetizer` classes. Using this class instead of a
regular socket makes it possible to talk with a Popen'd command that will
proxy traffic between the client and a server hosted in another machine.
Instances of this class may be used as context managers.
"""
def __init__(self, command_line):
"""
Create a new CommandProxy instance. The instance created by this
class can be passed as an argument to the `.Transport` class.
:param str command_line:
the command that should be executed and used as the proxy.
"""
if subprocess is None:
raise subprocess_import_error
self.cmd = shlex.split(command_line)
self.process = subprocess.Popen(
self.cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
)
self.timeout = None
def send(self, content):
"""
Write the content received from the SSH client to the standard
input of the forked command.
:param str content: string to be sent to the forked command
"""
try:
self.process.stdin.write(content)
except IOError as e:
# There was a problem with the child process. It probably
# died and we can't proceed. The best option here is to
# raise an exception informing the user that the informed
# ProxyCommand is not working.
raise ProxyCommandFailure(" ".join(self.cmd), e.strerror)
return len(content)
def recv(self, size):
"""
Read from the standard output of the forked program.
:param int size: how many chars should be read
:return: the string of bytes read, which may be shorter than requested
"""
try:
buffer = b""
start = time.time()
while len(buffer) < size:
select_timeout = None
if self.timeout is not None:
elapsed = time.time() - start
if elapsed >= self.timeout:
raise socket.timeout()
select_timeout = self.timeout - elapsed
r, w, x = select([self.process.stdout], [], [], select_timeout)
if r and r[0] == self.process.stdout:
buffer += os.read(
self.process.stdout.fileno(), size - len(buffer)
)
return buffer
except socket.timeout:
if buffer:
# Don't raise socket.timeout, return partial result instead
return buffer
raise # socket.timeout is a subclass of IOError
except IOError as e:
raise ProxyCommandFailure(" ".join(self.cmd), e.strerror)
def close(self):
os.kill(self.process.pid, signal.SIGTERM)
@property
def closed(self):
return self.process.returncode is not None
@property
def _closed(self):
# Concession to Python 3 socket-like API
return self.closed
def settimeout(self, timeout):
self.timeout = timeout

View File

@ -0,0 +1,227 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
RSA keys.
"""
from cryptography.exceptions import InvalidSignature, UnsupportedAlgorithm
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa, padding
from paramiko.message import Message
from paramiko.pkey import PKey
from paramiko.ssh_exception import SSHException
class RSAKey(PKey):
"""
Representation of an RSA key which can be used to sign and verify SSH2
data.
"""
name = "ssh-rsa"
HASHES = {
"ssh-rsa": hashes.SHA1,
"ssh-rsa-cert-v01@openssh.com": hashes.SHA1,
"rsa-sha2-256": hashes.SHA256,
"rsa-sha2-256-cert-v01@openssh.com": hashes.SHA256,
"rsa-sha2-512": hashes.SHA512,
"rsa-sha2-512-cert-v01@openssh.com": hashes.SHA512,
}
def __init__(
self,
msg=None,
data=None,
filename=None,
password=None,
key=None,
file_obj=None,
):
self.key = None
self.public_blob = None
if file_obj is not None:
self._from_private_key(file_obj, password)
return
if filename is not None:
self._from_private_key_file(filename, password)
return
if (msg is None) and (data is not None):
msg = Message(data)
if key is not None:
self.key = key
else:
self._check_type_and_load_cert(
msg=msg,
# NOTE: this does NOT change when using rsa2 signatures; it's
# purely about key loading, not exchange or verification
key_type=self.name,
cert_type="ssh-rsa-cert-v01@openssh.com",
)
self.key = rsa.RSAPublicNumbers(
e=msg.get_mpint(), n=msg.get_mpint()
).public_key(default_backend())
@classmethod
def identifiers(cls):
return list(cls.HASHES.keys())
@property
def size(self):
return self.key.key_size
@property
def public_numbers(self):
if isinstance(self.key, rsa.RSAPrivateKey):
return self.key.private_numbers().public_numbers
else:
return self.key.public_numbers()
def asbytes(self):
m = Message()
m.add_string(self.name)
m.add_mpint(self.public_numbers.e)
m.add_mpint(self.public_numbers.n)
return m.asbytes()
def __str__(self):
# NOTE: see #853 to explain some legacy behavior.
# TODO 4.0: replace with a nice clean fingerprint display or something
return self.asbytes().decode("utf8", errors="ignore")
@property
def _fields(self):
return (self.get_name(), self.public_numbers.e, self.public_numbers.n)
def get_name(self):
return self.name
def get_bits(self):
return self.size
def can_sign(self):
return isinstance(self.key, rsa.RSAPrivateKey)
def sign_ssh_data(self, data, algorithm=None):
if algorithm is None:
algorithm = self.name
sig = self.key.sign(
data,
padding=padding.PKCS1v15(),
# HASHES being just a map from long identifier to either SHA1 or
# SHA256 - cert'ness is not truly relevant.
algorithm=self.HASHES[algorithm](),
)
m = Message()
# And here again, cert'ness is irrelevant, so it is stripped out.
m.add_string(algorithm.replace("-cert-v01@openssh.com", ""))
m.add_string(sig)
return m
def verify_ssh_sig(self, data, msg):
sig_algorithm = msg.get_text()
if sig_algorithm not in self.HASHES:
return False
key = self.key
if isinstance(key, rsa.RSAPrivateKey):
key = key.public_key()
# NOTE: pad received signature with leading zeros, key.verify()
# expects a signature of key size (e.g. PuTTY doesn't pad)
sign = msg.get_binary()
diff = key.key_size - len(sign) * 8
if diff > 0:
sign = b"\x00" * ((diff + 7) // 8) + sign
try:
key.verify(
sign, data, padding.PKCS1v15(), self.HASHES[sig_algorithm]()
)
except InvalidSignature:
return False
else:
return True
def write_private_key_file(self, filename, password=None):
self._write_private_key_file(
filename,
self.key,
serialization.PrivateFormat.TraditionalOpenSSL,
password=password,
)
def write_private_key(self, file_obj, password=None):
self._write_private_key(
file_obj,
self.key,
serialization.PrivateFormat.TraditionalOpenSSL,
password=password,
)
@staticmethod
def generate(bits, progress_func=None):
"""
Generate a new private RSA key. This factory function can be used to
generate a new host key or authentication key.
:param int bits: number of bits the generated key should be.
:param progress_func: Unused
:return: new `.RSAKey` private key
"""
key = rsa.generate_private_key(
public_exponent=65537, key_size=bits, backend=default_backend()
)
return RSAKey(key=key)
# ...internals...
def _from_private_key_file(self, filename, password):
data = self._read_private_key_file("RSA", filename, password)
self._decode_key(data)
def _from_private_key(self, file_obj, password):
data = self._read_private_key("RSA", file_obj, password)
self._decode_key(data)
def _decode_key(self, data):
pkformat, data = data
if pkformat == self._PRIVATE_KEY_FORMAT_ORIGINAL:
try:
key = serialization.load_der_private_key(
data, password=None, backend=default_backend()
)
except (ValueError, TypeError, UnsupportedAlgorithm) as e:
raise SSHException(str(e))
elif pkformat == self._PRIVATE_KEY_FORMAT_OPENSSH:
n, e, d, iqmp, p, q = self._uint32_cstruct_unpack(data, "iiiiii")
public_numbers = rsa.RSAPublicNumbers(e=e, n=n)
key = rsa.RSAPrivateNumbers(
p=p,
q=q,
d=d,
dmp1=d % (p - 1),
dmq1=d % (q - 1),
iqmp=iqmp,
public_numbers=public_numbers,
).private_key(default_backend())
else:
self._got_bad_key_format_id(pkformat)
assert isinstance(key, rsa.RSAPrivateKey)
self.key = key

View File

@ -0,0 +1,732 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
`.ServerInterface` is an interface to override for server support.
"""
import threading
from paramiko import util
from paramiko.common import (
DEBUG,
ERROR,
OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
AUTH_FAILED,
AUTH_SUCCESSFUL,
)
class ServerInterface:
"""
This class defines an interface for controlling the behavior of Paramiko
in server mode.
Methods on this class are called from Paramiko's primary thread, so you
shouldn't do too much work in them. (Certainly nothing that blocks or
sleeps.)
"""
def check_channel_request(self, kind, chanid):
"""
Determine if a channel request of a given type will be granted, and
return ``OPEN_SUCCEEDED`` or an error code. This method is
called in server mode when the client requests a channel, after
authentication is complete.
If you allow channel requests (and an ssh server that didn't would be
useless), you should also override some of the channel request methods
below, which are used to determine which services will be allowed on
a given channel:
- `check_channel_pty_request`
- `check_channel_shell_request`
- `check_channel_subsystem_request`
- `check_channel_window_change_request`
- `check_channel_x11_request`
- `check_channel_forward_agent_request`
The ``chanid`` parameter is a small number that uniquely identifies the
channel within a `.Transport`. A `.Channel` object is not created
unless this method returns ``OPEN_SUCCEEDED`` -- once a
`.Channel` object is created, you can call `.Channel.get_id` to
retrieve the channel ID.
The return value should either be ``OPEN_SUCCEEDED`` (or
``0``) to allow the channel request, or one of the following error
codes to reject it:
- ``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``
- ``OPEN_FAILED_CONNECT_FAILED``
- ``OPEN_FAILED_UNKNOWN_CHANNEL_TYPE``
- ``OPEN_FAILED_RESOURCE_SHORTAGE``
The default implementation always returns
``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``.
:param str kind:
the kind of channel the client would like to open (usually
``"session"``).
:param int chanid: ID of the channel
:return: an `int` success or failure code (listed above)
"""
return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def get_allowed_auths(self, username):
"""
Return a list of authentication methods supported by the server.
This list is sent to clients attempting to authenticate, to inform them
of authentication methods that might be successful.
The "list" is actually a string of comma-separated names of types of
authentication. Possible values are ``"password"``, ``"publickey"``,
and ``"none"``.
The default implementation always returns ``"password"``.
:param str username: the username requesting authentication.
:return: a comma-separated `str` of authentication types
"""
return "password"
def check_auth_none(self, username):
"""
Determine if a client may open channels with no (further)
authentication.
Return ``AUTH_FAILED`` if the client must authenticate, or
``AUTH_SUCCESSFUL`` if it's okay for the client to not
authenticate.
The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the client.
:return:
``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
it succeeds.
:rtype: int
"""
return AUTH_FAILED
def check_auth_password(self, username, password):
"""
Determine if a given username and password supplied by the client is
acceptable for use in authentication.
Return ``AUTH_FAILED`` if the password is not accepted,
``AUTH_SUCCESSFUL`` if the password is accepted and completes
the authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
authentication is stateful, and this key is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
options it has for continuing the authentication.)
The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the authenticating client.
:param str password: the password given by the client.
:return:
``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
it succeeds; ``AUTH_PARTIALLY_SUCCESSFUL`` if the password auth is
successful, but authentication must continue.
:rtype: int
"""
return AUTH_FAILED
def check_auth_publickey(self, username, key):
"""
Determine if a given key supplied by the client is acceptable for use
in authentication. You should override this method in server mode to
check the username and key and decide if you would accept a signature
made using this key.
Return ``AUTH_FAILED`` if the key is not accepted,
``AUTH_SUCCESSFUL`` if the key is accepted and completes the
authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
authentication is stateful, and this password is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
options it has for continuing the authentication.)
Note that you don't have to actually verify any key signtature here.
If you're willing to accept the key, Paramiko will do the work of
verifying the client's signature.
The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the authenticating client
:param .PKey key: the key object provided by the client
:return:
``AUTH_FAILED`` if the client can't authenticate with this key;
``AUTH_SUCCESSFUL`` if it can; ``AUTH_PARTIALLY_SUCCESSFUL`` if it
can authenticate with this key but must continue with
authentication
:rtype: int
"""
return AUTH_FAILED
def check_auth_interactive(self, username, submethods):
"""
Begin an interactive authentication challenge, if supported. You
should override this method in server mode if you want to support the
``"keyboard-interactive"`` auth type, which requires you to send a
series of questions for the client to answer.
Return ``AUTH_FAILED`` if this auth method isn't supported. Otherwise,
you should return an `.InteractiveQuery` object containing the prompts
and instructions for the user. The response will be sent via a call
to `check_auth_interactive_response`.
The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the authenticating client
:param str submethods:
a comma-separated list of methods preferred by the client (usually
empty)
:return:
``AUTH_FAILED`` if this auth method isn't supported; otherwise an
object containing queries for the user
:rtype: int or `.InteractiveQuery`
"""
return AUTH_FAILED
def check_auth_interactive_response(self, responses):
"""
Continue or finish an interactive authentication challenge, if
supported. You should override this method in server mode if you want
to support the ``"keyboard-interactive"`` auth type.
Return ``AUTH_FAILED`` if the responses are not accepted,
``AUTH_SUCCESSFUL`` if the responses are accepted and complete
the authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
authentication is stateful, and this set of responses is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
options it has for continuing the authentication.)
If you wish to continue interactive authentication with more questions,
you may return an `.InteractiveQuery` object, which should cause the
client to respond with more answers, calling this method again. This
cycle can continue indefinitely.
The default implementation always returns ``AUTH_FAILED``.
:param responses: list of `str` responses from the client
:return:
``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
it succeeds; ``AUTH_PARTIALLY_SUCCESSFUL`` if the interactive auth
is successful, but authentication must continue; otherwise an
object containing queries for the user
:rtype: int or `.InteractiveQuery`
"""
return AUTH_FAILED
def check_auth_gssapi_with_mic(
self, username, gss_authenticated=AUTH_FAILED, cc_file=None
):
"""
Authenticate the given user to the server if he is a valid krb5
principal.
:param str username: The username of the authenticating client
:param int gss_authenticated: The result of the krb5 authentication
:param str cc_filename: The krb5 client credentials cache filename
:return: ``AUTH_FAILED`` if the user is not authenticated otherwise
``AUTH_SUCCESSFUL``
:rtype: int
:note: Kerberos credential delegation is not supported.
:see: `.ssh_gss`
:note: : We are just checking in L{AuthHandler} that the given user is
a valid krb5 principal!
We don't check if the krb5 principal is allowed to log in on
the server, because there is no way to do that in python. So
if you develop your own SSH server with paramiko for a certain
platform like Linux, you should call C{krb5_kuserok()} in
your local kerberos library to make sure that the
krb5_principal has an account on the server and is allowed to
log in as a user.
:see: http://www.unix.com/man-page/all/3/krb5_kuserok/
"""
if gss_authenticated == AUTH_SUCCESSFUL:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def check_auth_gssapi_keyex(
self, username, gss_authenticated=AUTH_FAILED, cc_file=None
):
"""
Authenticate the given user to the server if he is a valid krb5
principal and GSS-API Key Exchange was performed.
If GSS-API Key Exchange was not performed, this authentication method
won't be available.
:param str username: The username of the authenticating client
:param int gss_authenticated: The result of the krb5 authentication
:param str cc_filename: The krb5 client credentials cache filename
:return: ``AUTH_FAILED`` if the user is not authenticated otherwise
``AUTH_SUCCESSFUL``
:rtype: int
:note: Kerberos credential delegation is not supported.
:see: `.ssh_gss` `.kex_gss`
:note: : We are just checking in L{AuthHandler} that the given user is
a valid krb5 principal!
We don't check if the krb5 principal is allowed to log in on
the server, because there is no way to do that in python. So
if you develop your own SSH server with paramiko for a certain
platform like Linux, you should call C{krb5_kuserok()} in
your local kerberos library to make sure that the
krb5_principal has an account on the server and is allowed
to log in as a user.
:see: http://www.unix.com/man-page/all/3/krb5_kuserok/
"""
if gss_authenticated == AUTH_SUCCESSFUL:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def enable_auth_gssapi(self):
"""
Overwrite this function in your SSH server to enable GSSAPI
authentication.
The default implementation always returns false.
:returns bool: Whether GSSAPI authentication is enabled.
:see: `.ssh_gss`
"""
UseGSSAPI = False
return UseGSSAPI
def check_port_forward_request(self, address, port):
"""
Handle a request for port forwarding. The client is asking that
connections to the given address and port be forwarded back across
this ssh connection. An address of ``"0.0.0.0"`` indicates a global
address (any address associated with this server) and a port of ``0``
indicates that no specific port is requested (usually the OS will pick
a port).
The default implementation always returns ``False``, rejecting the
port forwarding request. If the request is accepted, you should return
the port opened for listening.
:param str address: the requested address
:param int port: the requested port
:return:
the port number (`int`) that was opened for listening, or ``False``
to reject
"""
return False
def cancel_port_forward_request(self, address, port):
"""
The client would like to cancel a previous port-forwarding request.
If the given address and port is being forwarded across this ssh
connection, the port should be closed.
:param str address: the forwarded address
:param int port: the forwarded port
"""
pass
def check_global_request(self, kind, msg):
"""
Handle a global request of the given ``kind``. This method is called
in server mode and client mode, whenever the remote host makes a global
request. If there are any arguments to the request, they will be in
``msg``.
There aren't any useful global requests defined, aside from port
forwarding, so usually this type of request is an extension to the
protocol.
If the request was successful and you would like to return contextual
data to the remote host, return a tuple. Items in the tuple will be
sent back with the successful result. (Note that the items in the
tuple can only be strings, ints, or bools.)
The default implementation always returns ``False``, indicating that it
does not support any global requests.
.. note:: Port forwarding requests are handled separately, in
`check_port_forward_request`.
:param str kind: the kind of global request being made.
:param .Message msg: any extra arguments to the request.
:return:
``True`` or a `tuple` of data if the request was granted; ``False``
otherwise.
"""
return False
# ...Channel requests...
def check_channel_pty_request(
self, channel, term, width, height, pixelwidth, pixelheight, modes
):
"""
Determine if a pseudo-terminal of the given dimensions (usually
requested for shell access) can be provided on the given channel.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param str term: type of terminal requested (for example, ``"vt100"``).
:param int width: width of screen in characters.
:param int height: height of screen in characters.
:param int pixelwidth:
width of screen in pixels, if known (may be ``0`` if unknown).
:param int pixelheight:
height of screen in pixels, if known (may be ``0`` if unknown).
:return:
``True`` if the pseudo-terminal has been allocated; ``False``
otherwise.
"""
return False
def check_channel_shell_request(self, channel):
"""
Determine if a shell will be provided to the client on the given
channel. If this method returns ``True``, the channel should be
connected to the stdin/stdout of a shell (or something that acts like
a shell).
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the request arrived on.
:return:
``True`` if this channel is now hooked up to a shell; ``False`` if
a shell can't or won't be provided.
"""
return False
def check_channel_exec_request(self, channel, command):
"""
Determine if a shell command will be executed for the client. If this
method returns ``True``, the channel should be connected to the stdin,
stdout, and stderr of the shell command.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the request arrived on.
:param str command: the command to execute.
:return:
``True`` if this channel is now hooked up to the stdin, stdout, and
stderr of the executing command; ``False`` if the command will not
be executed.
.. versionadded:: 1.1
"""
return False
def check_channel_subsystem_request(self, channel, name):
"""
Determine if a requested subsystem will be provided to the client on
the given channel. If this method returns ``True``, all future I/O
through this channel will be assumed to be connected to the requested
subsystem. An example of a subsystem is ``sftp``.
The default implementation checks for a subsystem handler assigned via
`.Transport.set_subsystem_handler`.
If one has been set, the handler is invoked and this method returns
``True``. Otherwise it returns ``False``.
.. note:: Because the default implementation uses the `.Transport` to
identify valid subsystems, you probably won't need to override this
method.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param str name: name of the requested subsystem.
:return:
``True`` if this channel is now hooked up to the requested
subsystem; ``False`` if that subsystem can't or won't be provided.
"""
transport = channel.get_transport()
handler_class, args, kwargs = transport._get_subsystem_handler(name)
if handler_class is None:
return False
handler = handler_class(channel, name, self, *args, **kwargs)
handler.start()
return True
def check_channel_window_change_request(
self, channel, width, height, pixelwidth, pixelheight
):
"""
Determine if the pseudo-terminal on the given channel can be resized.
This only makes sense if a pty was previously allocated on it.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param int width: width of screen in characters.
:param int height: height of screen in characters.
:param int pixelwidth:
width of screen in pixels, if known (may be ``0`` if unknown).
:param int pixelheight:
height of screen in pixels, if known (may be ``0`` if unknown).
:return: ``True`` if the terminal was resized; ``False`` if not.
"""
return False
def check_channel_x11_request(
self,
channel,
single_connection,
auth_protocol,
auth_cookie,
screen_number,
):
"""
Determine if the client will be provided with an X11 session. If this
method returns ``True``, X11 applications should be routed through new
SSH channels, using `.Transport.open_x11_channel`.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the X11 request arrived on
:param bool single_connection:
``True`` if only a single X11 channel should be opened, else
``False``.
:param str auth_protocol: the protocol used for X11 authentication
:param str auth_cookie: the cookie used to authenticate to X11
:param int screen_number: the number of the X11 screen to connect to
:return: ``True`` if the X11 session was opened; ``False`` if not
"""
return False
def check_channel_forward_agent_request(self, channel):
"""
Determine if the client will be provided with an forward agent session.
If this method returns ``True``, the server will allow SSH Agent
forwarding.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the request arrived on
:return: ``True`` if the AgentForward was loaded; ``False`` if not
If ``True`` is returned, the server should create an
:class:`AgentServerProxy` to access the agent.
"""
return False
def check_channel_direct_tcpip_request(self, chanid, origin, destination):
"""
Determine if a local port forwarding channel will be granted, and
return ``OPEN_SUCCEEDED`` or an error code. This method is
called in server mode when the client requests a channel, after
authentication is complete.
The ``chanid`` parameter is a small number that uniquely identifies the
channel within a `.Transport`. A `.Channel` object is not created
unless this method returns ``OPEN_SUCCEEDED`` -- once a
`.Channel` object is created, you can call `.Channel.get_id` to
retrieve the channel ID.
The origin and destination parameters are (ip_address, port) tuples
that correspond to both ends of the TCP connection in the forwarding
tunnel.
The return value should either be ``OPEN_SUCCEEDED`` (or
``0``) to allow the channel request, or one of the following error
codes to reject it:
- ``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``
- ``OPEN_FAILED_CONNECT_FAILED``
- ``OPEN_FAILED_UNKNOWN_CHANNEL_TYPE``
- ``OPEN_FAILED_RESOURCE_SHORTAGE``
The default implementation always returns
``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``.
:param int chanid: ID of the channel
:param tuple origin:
2-tuple containing the IP address and port of the originator
(client side)
:param tuple destination:
2-tuple containing the IP address and port of the destination
(server side)
:return: an `int` success or failure code (listed above)
"""
return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_channel_env_request(self, channel, name, value):
"""
Check whether a given environment variable can be specified for the
given channel. This method should return ``True`` if the server
is willing to set the specified environment variable. Note that
some environment variables (e.g., PATH) can be exceedingly
dangerous, so blindly allowing the client to set the environment
is almost certainly not a good idea.
The default implementation always returns ``False``.
:param channel: the `.Channel` the env request arrived on
:param str name: name
:param str value: Channel value
:returns: A boolean
"""
return False
def get_banner(self):
"""
A pre-login banner to display to the user. The message may span
multiple lines separated by crlf pairs. The language should be in
rfc3066 style, for example: en-US
The default implementation always returns ``(None, None)``.
:returns: A tuple containing the banner and language code.
.. versionadded:: 2.3
"""
return (None, None)
class InteractiveQuery:
"""
A query (set of prompts) for a user during interactive authentication.
"""
def __init__(self, name="", instructions="", *prompts):
"""
Create a new interactive query to send to the client. The name and
instructions are optional, but are generally displayed to the end
user. A list of prompts may be included, or they may be added via
the `add_prompt` method.
:param str name: name of this query
:param str instructions:
user instructions (usually short) about this query
:param str prompts: one or more authentication prompts
"""
self.name = name
self.instructions = instructions
self.prompts = []
for x in prompts:
if isinstance(x, str):
self.add_prompt(x)
else:
self.add_prompt(x[0], x[1])
def add_prompt(self, prompt, echo=True):
"""
Add a prompt to this query. The prompt should be a (reasonably short)
string. Multiple prompts can be added to the same query.
:param str prompt: the user prompt
:param bool echo:
``True`` (default) if the user's response should be echoed;
``False`` if not (for a password or similar)
"""
self.prompts.append((prompt, echo))
class SubsystemHandler(threading.Thread):
"""
Handler for a subsystem in server mode. If you create a subclass of this
class and pass it to `.Transport.set_subsystem_handler`, an object of this
class will be created for each request for this subsystem. Each new object
will be executed within its own new thread by calling `start_subsystem`.
When that method completes, the channel is closed.
For example, if you made a subclass ``MP3Handler`` and registered it as the
handler for subsystem ``"mp3"``, then whenever a client has successfully
authenticated and requests subsystem ``"mp3"``, an object of class
``MP3Handler`` will be created, and `start_subsystem` will be called on
it from a new thread.
"""
def __init__(self, channel, name, server):
"""
Create a new handler for a channel. This is used by `.ServerInterface`
to start up a new handler when a channel requests this subsystem. You
don't need to override this method, but if you do, be sure to pass the
``channel`` and ``name`` parameters through to the original
``__init__`` method here.
:param .Channel channel: the channel associated with this
subsystem request.
:param str name: name of the requested subsystem.
:param .ServerInterface server:
the server object for the session that started this subsystem
"""
threading.Thread.__init__(self, target=self._run)
self.__channel = channel
self.__transport = channel.get_transport()
self.__name = name
self.__server = server
def get_server(self):
"""
Return the `.ServerInterface` object associated with this channel and
subsystem.
"""
return self.__server
def _run(self):
try:
self.__transport._log(
DEBUG, "Starting handler for subsystem {}".format(self.__name)
)
self.start_subsystem(self.__name, self.__transport, self.__channel)
except Exception as e:
self.__transport._log(
ERROR,
'Exception in subsystem handler for "{}": {}'.format(
self.__name, e
),
)
self.__transport._log(ERROR, util.tb_strings())
try:
self.finish_subsystem()
except:
pass
def start_subsystem(self, name, transport, channel):
"""
Process an ssh subsystem in server mode. This method is called on a
new object (and in a new thread) for each subsystem request. It is
assumed that all subsystem logic will take place here, and when the
subsystem is finished, this method will return. After this method
returns, the channel is closed.
The combination of ``transport`` and ``channel`` are unique; this
handler corresponds to exactly one `.Channel` on one `.Transport`.
.. note::
It is the responsibility of this method to exit if the underlying
`.Transport` is closed. This can be done by checking
`.Transport.is_active` or noticing an EOF on the `.Channel`. If
this method loops forever without checking for this case, your
Python interpreter may refuse to exit because this thread will
still be running.
:param str name: name of the requested subsystem.
:param .Transport transport: the server-mode `.Transport`.
:param .Channel channel: the channel associated with this subsystem
request.
"""
pass
def finish_subsystem(self):
"""
Perform any cleanup at the end of a subsystem. The default
implementation just closes the channel.
.. versionadded:: 1.1
"""
self.__channel.close()

View File

@ -0,0 +1,224 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import select
import socket
import struct
from paramiko import util
from paramiko.common import DEBUG, byte_chr, byte_ord
from paramiko.message import Message
(
CMD_INIT,
CMD_VERSION,
CMD_OPEN,
CMD_CLOSE,
CMD_READ,
CMD_WRITE,
CMD_LSTAT,
CMD_FSTAT,
CMD_SETSTAT,
CMD_FSETSTAT,
CMD_OPENDIR,
CMD_READDIR,
CMD_REMOVE,
CMD_MKDIR,
CMD_RMDIR,
CMD_REALPATH,
CMD_STAT,
CMD_RENAME,
CMD_READLINK,
CMD_SYMLINK,
) = range(1, 21)
(CMD_STATUS, CMD_HANDLE, CMD_DATA, CMD_NAME, CMD_ATTRS) = range(101, 106)
(CMD_EXTENDED, CMD_EXTENDED_REPLY) = range(200, 202)
SFTP_OK = 0
(
SFTP_EOF,
SFTP_NO_SUCH_FILE,
SFTP_PERMISSION_DENIED,
SFTP_FAILURE,
SFTP_BAD_MESSAGE,
SFTP_NO_CONNECTION,
SFTP_CONNECTION_LOST,
SFTP_OP_UNSUPPORTED,
) = range(1, 9)
SFTP_DESC = [
"Success",
"End of file",
"No such file",
"Permission denied",
"Failure",
"Bad message",
"No connection",
"Connection lost",
"Operation unsupported",
]
SFTP_FLAG_READ = 0x1
SFTP_FLAG_WRITE = 0x2
SFTP_FLAG_APPEND = 0x4
SFTP_FLAG_CREATE = 0x8
SFTP_FLAG_TRUNC = 0x10
SFTP_FLAG_EXCL = 0x20
_VERSION = 3
# for debugging
CMD_NAMES = {
CMD_INIT: "init",
CMD_VERSION: "version",
CMD_OPEN: "open",
CMD_CLOSE: "close",
CMD_READ: "read",
CMD_WRITE: "write",
CMD_LSTAT: "lstat",
CMD_FSTAT: "fstat",
CMD_SETSTAT: "setstat",
CMD_FSETSTAT: "fsetstat",
CMD_OPENDIR: "opendir",
CMD_READDIR: "readdir",
CMD_REMOVE: "remove",
CMD_MKDIR: "mkdir",
CMD_RMDIR: "rmdir",
CMD_REALPATH: "realpath",
CMD_STAT: "stat",
CMD_RENAME: "rename",
CMD_READLINK: "readlink",
CMD_SYMLINK: "symlink",
CMD_STATUS: "status",
CMD_HANDLE: "handle",
CMD_DATA: "data",
CMD_NAME: "name",
CMD_ATTRS: "attrs",
CMD_EXTENDED: "extended",
CMD_EXTENDED_REPLY: "extended_reply",
}
# TODO: rewrite SFTP file/server modules' overly-flexible "make a request with
# xyz components" so we don't need this very silly method of signaling whether
# a given Python integer should be 32- or 64-bit.
# NOTE: this only became an issue when dropping Python 2 support; prior to
# doing so, we had to support actual-longs, which served as that signal. This
# is simply recreating that structure in a more tightly scoped fashion.
class int64(int):
pass
class SFTPError(Exception):
pass
class BaseSFTP:
def __init__(self):
self.logger = util.get_logger("paramiko.sftp")
self.sock = None
self.ultra_debug = False
# ...internals...
def _send_version(self):
m = Message()
m.add_int(_VERSION)
self._send_packet(CMD_INIT, m)
t, data = self._read_packet()
if t != CMD_VERSION:
raise SFTPError("Incompatible sftp protocol")
version = struct.unpack(">I", data[:4])[0]
# if version != _VERSION:
# raise SFTPError('Incompatible sftp protocol')
return version
def _send_server_version(self):
# winscp will freak out if the server sends version info before the
# client finishes sending INIT.
t, data = self._read_packet()
if t != CMD_INIT:
raise SFTPError("Incompatible sftp protocol")
version = struct.unpack(">I", data[:4])[0]
# advertise that we support "check-file"
extension_pairs = ["check-file", "md5,sha1"]
msg = Message()
msg.add_int(_VERSION)
msg.add(*extension_pairs)
self._send_packet(CMD_VERSION, msg)
return version
def _log(self, level, msg, *args):
self.logger.log(level, msg, *args)
def _write_all(self, out):
while len(out) > 0:
n = self.sock.send(out)
if n <= 0:
raise EOFError()
if n == len(out):
return
out = out[n:]
return
def _read_all(self, n):
out = bytes()
while n > 0:
if isinstance(self.sock, socket.socket):
# sometimes sftp is used directly over a socket instead of
# through a paramiko channel. in this case, check periodically
# if the socket is closed. (for some reason, recv() won't ever
# return or raise an exception, but calling select on a closed
# socket will.)
while True:
read, write, err = select.select([self.sock], [], [], 0.1)
if len(read) > 0:
x = self.sock.recv(n)
break
else:
x = self.sock.recv(n)
if len(x) == 0:
raise EOFError()
out += x
n -= len(x)
return out
def _send_packet(self, t, packet):
packet = packet.asbytes()
out = struct.pack(">I", len(packet) + 1) + byte_chr(t) + packet
if self.ultra_debug:
self._log(DEBUG, util.format_binary(out, "OUT: "))
self._write_all(out)
def _read_packet(self):
x = self._read_all(4)
# most sftp servers won't accept packets larger than about 32k, so
# anything with the high byte set (> 16MB) is just garbage.
if byte_ord(x[0]):
raise SFTPError("Garbage packet received")
size = struct.unpack(">I", x)[0]
data = self._read_all(size)
if self.ultra_debug:
self._log(DEBUG, util.format_binary(data, "IN: "))
if size > 0:
t = byte_ord(data[0])
return t, data[1:]
return 0, bytes()

View File

@ -0,0 +1,239 @@
# Copyright (C) 2003-2006 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import stat
import time
from paramiko.common import x80000000, o700, o70, xffffffff
class SFTPAttributes:
"""
Representation of the attributes of a file (or proxied file) for SFTP in
client or server mode. It attempts to mirror the object returned by
`os.stat` as closely as possible, so it may have the following fields,
with the same meanings as those returned by an `os.stat` object:
- ``st_size``
- ``st_uid``
- ``st_gid``
- ``st_mode``
- ``st_atime``
- ``st_mtime``
Because SFTP allows flags to have other arbitrary named attributes, these
are stored in a dict named ``attr``. Occasionally, the filename is also
stored, in ``filename``.
"""
FLAG_SIZE = 1
FLAG_UIDGID = 2
FLAG_PERMISSIONS = 4
FLAG_AMTIME = 8
FLAG_EXTENDED = x80000000
def __init__(self):
"""
Create a new (empty) SFTPAttributes object. All fields will be empty.
"""
self._flags = 0
self.st_size = None
self.st_uid = None
self.st_gid = None
self.st_mode = None
self.st_atime = None
self.st_mtime = None
self.attr = {}
@classmethod
def from_stat(cls, obj, filename=None):
"""
Create an `.SFTPAttributes` object from an existing ``stat`` object (an
object returned by `os.stat`).
:param object obj: an object returned by `os.stat` (or equivalent).
:param str filename: the filename associated with this file.
:return: new `.SFTPAttributes` object with the same attribute fields.
"""
attr = cls()
attr.st_size = obj.st_size
attr.st_uid = obj.st_uid
attr.st_gid = obj.st_gid
attr.st_mode = obj.st_mode
attr.st_atime = obj.st_atime
attr.st_mtime = obj.st_mtime
if filename is not None:
attr.filename = filename
return attr
def __repr__(self):
return "<SFTPAttributes: {}>".format(self._debug_str())
# ...internals...
@classmethod
def _from_msg(cls, msg, filename=None, longname=None):
attr = cls()
attr._unpack(msg)
if filename is not None:
attr.filename = filename
if longname is not None:
attr.longname = longname
return attr
def _unpack(self, msg):
self._flags = msg.get_int()
if self._flags & self.FLAG_SIZE:
self.st_size = msg.get_int64()
if self._flags & self.FLAG_UIDGID:
self.st_uid = msg.get_int()
self.st_gid = msg.get_int()
if self._flags & self.FLAG_PERMISSIONS:
self.st_mode = msg.get_int()
if self._flags & self.FLAG_AMTIME:
self.st_atime = msg.get_int()
self.st_mtime = msg.get_int()
if self._flags & self.FLAG_EXTENDED:
count = msg.get_int()
for i in range(count):
self.attr[msg.get_string()] = msg.get_string()
def _pack(self, msg):
self._flags = 0
if self.st_size is not None:
self._flags |= self.FLAG_SIZE
if (self.st_uid is not None) and (self.st_gid is not None):
self._flags |= self.FLAG_UIDGID
if self.st_mode is not None:
self._flags |= self.FLAG_PERMISSIONS
if (self.st_atime is not None) and (self.st_mtime is not None):
self._flags |= self.FLAG_AMTIME
if len(self.attr) > 0:
self._flags |= self.FLAG_EXTENDED
msg.add_int(self._flags)
if self._flags & self.FLAG_SIZE:
msg.add_int64(self.st_size)
if self._flags & self.FLAG_UIDGID:
msg.add_int(self.st_uid)
msg.add_int(self.st_gid)
if self._flags & self.FLAG_PERMISSIONS:
msg.add_int(self.st_mode)
if self._flags & self.FLAG_AMTIME:
# throw away any fractional seconds
msg.add_int(int(self.st_atime))
msg.add_int(int(self.st_mtime))
if self._flags & self.FLAG_EXTENDED:
msg.add_int(len(self.attr))
for key, val in self.attr.items():
msg.add_string(key)
msg.add_string(val)
return
def _debug_str(self):
out = "[ "
if self.st_size is not None:
out += "size={} ".format(self.st_size)
if (self.st_uid is not None) and (self.st_gid is not None):
out += "uid={} gid={} ".format(self.st_uid, self.st_gid)
if self.st_mode is not None:
out += "mode=" + oct(self.st_mode) + " "
if (self.st_atime is not None) and (self.st_mtime is not None):
out += "atime={} mtime={} ".format(self.st_atime, self.st_mtime)
for k, v in self.attr.items():
out += '"{}"={!r} '.format(str(k), v)
out += "]"
return out
@staticmethod
def _rwx(n, suid, sticky=False):
if suid:
suid = 2
out = "-r"[n >> 2] + "-w"[(n >> 1) & 1]
if sticky:
out += "-xTt"[suid + (n & 1)]
else:
out += "-xSs"[suid + (n & 1)]
return out
def __str__(self):
"""create a unix-style long description of the file (like ls -l)"""
if self.st_mode is not None:
kind = stat.S_IFMT(self.st_mode)
if kind == stat.S_IFIFO:
ks = "p"
elif kind == stat.S_IFCHR:
ks = "c"
elif kind == stat.S_IFDIR:
ks = "d"
elif kind == stat.S_IFBLK:
ks = "b"
elif kind == stat.S_IFREG:
ks = "-"
elif kind == stat.S_IFLNK:
ks = "l"
elif kind == stat.S_IFSOCK:
ks = "s"
else:
ks = "?"
ks += self._rwx(
(self.st_mode & o700) >> 6, self.st_mode & stat.S_ISUID
)
ks += self._rwx(
(self.st_mode & o70) >> 3, self.st_mode & stat.S_ISGID
)
ks += self._rwx(
self.st_mode & 7, self.st_mode & stat.S_ISVTX, True
)
else:
ks = "?---------"
# compute display date
if (self.st_mtime is None) or (self.st_mtime == xffffffff):
# shouldn't really happen
datestr = "(unknown date)"
else:
time_tuple = time.localtime(self.st_mtime)
if abs(time.time() - self.st_mtime) > 15_552_000:
# (15,552,000s = 6 months)
datestr = time.strftime("%d %b %Y", time_tuple)
else:
datestr = time.strftime("%d %b %H:%M", time_tuple)
filename = getattr(self, "filename", "?")
# not all servers support uid/gid
uid = self.st_uid
gid = self.st_gid
size = self.st_size
if uid is None:
uid = 0
if gid is None:
gid = 0
if size is None:
size = 0
# TODO: not sure this actually worked as expected beforehand, leaving
# it untouched for the time being, re: .format() upgrade, until someone
# has time to doublecheck
return "%s 1 %-8d %-8d %8d %-12s %s" % (
ks,
uid,
gid,
size,
datestr,
filename,
)
def asbytes(self):
return str(self).encode()

View File

@ -0,0 +1,965 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of Paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from binascii import hexlify
import errno
import os
import stat
import threading
import time
import weakref
from paramiko import util
from paramiko.channel import Channel
from paramiko.message import Message
from paramiko.common import INFO, DEBUG, o777
from paramiko.sftp import (
BaseSFTP,
CMD_OPENDIR,
CMD_HANDLE,
SFTPError,
CMD_READDIR,
CMD_NAME,
CMD_CLOSE,
SFTP_FLAG_READ,
SFTP_FLAG_WRITE,
SFTP_FLAG_CREATE,
SFTP_FLAG_TRUNC,
SFTP_FLAG_APPEND,
SFTP_FLAG_EXCL,
CMD_OPEN,
CMD_REMOVE,
CMD_RENAME,
CMD_MKDIR,
CMD_RMDIR,
CMD_STAT,
CMD_ATTRS,
CMD_LSTAT,
CMD_SYMLINK,
CMD_SETSTAT,
CMD_READLINK,
CMD_REALPATH,
CMD_STATUS,
CMD_EXTENDED,
SFTP_OK,
SFTP_EOF,
SFTP_NO_SUCH_FILE,
SFTP_PERMISSION_DENIED,
int64,
)
from paramiko.sftp_attr import SFTPAttributes
from paramiko.ssh_exception import SSHException
from paramiko.sftp_file import SFTPFile
from paramiko.util import ClosingContextManager, b, u
def _to_unicode(s):
"""
decode a string as ascii or utf8 if possible (as required by the sftp
protocol). if neither works, just return a byte string because the server
probably doesn't know the filename's encoding.
"""
try:
return s.encode("ascii")
except (UnicodeError, AttributeError):
try:
return s.decode("utf-8")
except UnicodeError:
return s
b_slash = b"/"
class SFTPClient(BaseSFTP, ClosingContextManager):
"""
SFTP client object.
Used to open an SFTP session across an open SSH `.Transport` and perform
remote file operations.
Instances of this class may be used as context managers.
"""
def __init__(self, sock):
"""
Create an SFTP client from an existing `.Channel`. The channel
should already have requested the ``"sftp"`` subsystem.
An alternate way to create an SFTP client context is by using
`from_transport`.
:param .Channel sock: an open `.Channel` using the ``"sftp"`` subsystem
:raises:
`.SSHException` -- if there's an exception while negotiating sftp
"""
BaseSFTP.__init__(self)
self.sock = sock
self.ultra_debug = False
self.request_number = 1
# lock for request_number
self._lock = threading.Lock()
self._cwd = None
# request # -> SFTPFile
self._expecting = weakref.WeakValueDictionary()
if type(sock) is Channel:
# override default logger
transport = self.sock.get_transport()
self.logger = util.get_logger(
transport.get_log_channel() + ".sftp"
)
self.ultra_debug = transport.get_hexdump()
try:
server_version = self._send_version()
except EOFError:
raise SSHException("EOF during negotiation")
self._log(
INFO,
"Opened sftp connection (server version {})".format(
server_version
),
)
@classmethod
def from_transport(cls, t, window_size=None, max_packet_size=None):
"""
Create an SFTP client channel from an open `.Transport`.
Setting the window and packet sizes might affect the transfer speed.
The default settings in the `.Transport` class are the same as in
OpenSSH and should work adequately for both files transfers and
interactive sessions.
:param .Transport t: an open `.Transport` which is already
authenticated
:param int window_size:
optional window size for the `.SFTPClient` session.
:param int max_packet_size:
optional max packet size for the `.SFTPClient` session..
:return:
a new `.SFTPClient` object, referring to an sftp session (channel)
across the transport
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
chan = t.open_session(
window_size=window_size, max_packet_size=max_packet_size
)
if chan is None:
return None
chan.invoke_subsystem("sftp")
return cls(chan)
def _log(self, level, msg, *args):
if isinstance(msg, list):
for m in msg:
self._log(level, m, *args)
else:
# NOTE: these bits MUST continue using %-style format junk because
# logging.Logger.log() explicitly requires it. Grump.
# escape '%' in msg (they could come from file or directory names)
# before logging
msg = msg.replace("%", "%%")
super()._log(
level,
"[chan %s] " + msg,
*([self.sock.get_name()] + list(args))
)
def close(self):
"""
Close the SFTP session and its underlying channel.
.. versionadded:: 1.4
"""
self._log(INFO, "sftp session closed.")
self.sock.close()
def get_channel(self):
"""
Return the underlying `.Channel` object for this SFTP session. This
might be useful for doing things like setting a timeout on the channel.
.. versionadded:: 1.7.1
"""
return self.sock
def listdir(self, path="."):
"""
Return a list containing the names of the entries in the given
``path``.
The list is in arbitrary order. It does not include the special
entries ``'.'`` and ``'..'`` even if they are present in the folder.
This method is meant to mirror ``os.listdir`` as closely as possible.
For a list of full `.SFTPAttributes` objects, see `listdir_attr`.
:param str path: path to list (defaults to ``'.'``)
"""
return [f.filename for f in self.listdir_attr(path)]
def listdir_attr(self, path="."):
"""
Return a list containing `.SFTPAttributes` objects corresponding to
files in the given ``path``. The list is in arbitrary order. It does
not include the special entries ``'.'`` and ``'..'`` even if they are
present in the folder.
The returned `.SFTPAttributes` objects will each have an additional
field: ``longname``, which may contain a formatted string of the file's
attributes, in unix format. The content of this string will probably
depend on the SFTP server implementation.
:param str path: path to list (defaults to ``'.'``)
:return: list of `.SFTPAttributes` objects
.. versionadded:: 1.2
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "listdir({!r})".format(path))
t, msg = self._request(CMD_OPENDIR, path)
if t != CMD_HANDLE:
raise SFTPError("Expected handle")
handle = msg.get_binary()
filelist = []
while True:
try:
t, msg = self._request(CMD_READDIR, handle)
except EOFError:
# done with handle
break
if t != CMD_NAME:
raise SFTPError("Expected name response")
count = msg.get_int()
for i in range(count):
filename = msg.get_text()
longname = msg.get_text()
attr = SFTPAttributes._from_msg(msg, filename, longname)
if (filename != ".") and (filename != ".."):
filelist.append(attr)
self._request(CMD_CLOSE, handle)
return filelist
def listdir_iter(self, path=".", read_aheads=50):
"""
Generator version of `.listdir_attr`.
See the API docs for `.listdir_attr` for overall details.
This function adds one more kwarg on top of `.listdir_attr`:
``read_aheads``, an integer controlling how many
``SSH_FXP_READDIR`` requests are made to the server. The default of 50
should suffice for most file listings as each request/response cycle
may contain multiple files (dependent on server implementation.)
.. versionadded:: 1.15
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "listdir({!r})".format(path))
t, msg = self._request(CMD_OPENDIR, path)
if t != CMD_HANDLE:
raise SFTPError("Expected handle")
handle = msg.get_string()
nums = list()
while True:
try:
# Send out a bunch of readdir requests so that we can read the
# responses later on Section 6.7 of the SSH file transfer RFC
# explains this
# http://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt
for i in range(read_aheads):
num = self._async_request(type(None), CMD_READDIR, handle)
nums.append(num)
# For each of our sent requests
# Read and parse the corresponding packets
# If we're at the end of our queued requests, then fire off
# some more requests
# Exit the loop when we've reached the end of the directory
# handle
for num in nums:
t, pkt_data = self._read_packet()
msg = Message(pkt_data)
new_num = msg.get_int()
if num == new_num:
if t == CMD_STATUS:
self._convert_status(msg)
count = msg.get_int()
for i in range(count):
filename = msg.get_text()
longname = msg.get_text()
attr = SFTPAttributes._from_msg(
msg, filename, longname
)
if (filename != ".") and (filename != ".."):
yield attr
# If we've hit the end of our queued requests, reset nums.
nums = list()
except EOFError:
self._request(CMD_CLOSE, handle)
return
def open(self, filename, mode="r", bufsize=-1):
"""
Open a file on the remote server. The arguments are the same as for
Python's built-in `python:file` (aka `python:open`). A file-like
object is returned, which closely mimics the behavior of a normal
Python file object, including the ability to be used as a context
manager.
The mode indicates how the file is to be opened: ``'r'`` for reading,
``'w'`` for writing (truncating an existing file), ``'a'`` for
appending, ``'r+'`` for reading/writing, ``'w+'`` for reading/writing
(truncating an existing file), ``'a+'`` for reading/appending. The
Python ``'b'`` flag is ignored, since SSH treats all files as binary.
The ``'U'`` flag is supported in a compatible way.
Since 1.5.2, an ``'x'`` flag indicates that the operation should only
succeed if the file was created and did not previously exist. This has
no direct mapping to Python's file flags, but is commonly known as the
``O_EXCL`` flag in posix.
The file will be buffered in standard Python style by default, but
can be altered with the ``bufsize`` parameter. ``<=0`` turns off
buffering, ``1`` uses line buffering, and any number greater than 1
(``>1``) uses that specific buffer size.
:param str filename: name of the file to open
:param str mode: mode (Python-style) to open in
:param int bufsize: desired buffering (default: ``-1``)
:return: an `.SFTPFile` object representing the open file
:raises: ``IOError`` -- if the file could not be opened.
"""
filename = self._adjust_cwd(filename)
self._log(DEBUG, "open({!r}, {!r})".format(filename, mode))
imode = 0
if ("r" in mode) or ("+" in mode):
imode |= SFTP_FLAG_READ
if ("w" in mode) or ("+" in mode) or ("a" in mode):
imode |= SFTP_FLAG_WRITE
if "w" in mode:
imode |= SFTP_FLAG_CREATE | SFTP_FLAG_TRUNC
if "a" in mode:
imode |= SFTP_FLAG_CREATE | SFTP_FLAG_APPEND
if "x" in mode:
imode |= SFTP_FLAG_CREATE | SFTP_FLAG_EXCL
attrblock = SFTPAttributes()
t, msg = self._request(CMD_OPEN, filename, imode, attrblock)
if t != CMD_HANDLE:
raise SFTPError("Expected handle")
handle = msg.get_binary()
self._log(
DEBUG,
"open({!r}, {!r}) -> {}".format(
filename, mode, u(hexlify(handle))
),
)
return SFTPFile(self, handle, mode, bufsize)
# Python continues to vacillate about "open" vs "file"...
file = open
def remove(self, path):
"""
Remove the file at the given path. This only works on files; for
removing folders (directories), use `rmdir`.
:param str path: path (absolute or relative) of the file to remove
:raises: ``IOError`` -- if the path refers to a folder (directory)
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "remove({!r})".format(path))
self._request(CMD_REMOVE, path)
unlink = remove
def rename(self, oldpath, newpath):
"""
Rename a file or folder from ``oldpath`` to ``newpath``.
.. note::
This method implements 'standard' SFTP ``RENAME`` behavior; those
seeking the OpenSSH "POSIX rename" extension behavior should use
`posix_rename`.
:param str oldpath:
existing name of the file or folder
:param str newpath:
new name for the file or folder, must not exist already
:raises:
``IOError`` -- if ``newpath`` is a folder, or something else goes
wrong
"""
oldpath = self._adjust_cwd(oldpath)
newpath = self._adjust_cwd(newpath)
self._log(DEBUG, "rename({!r}, {!r})".format(oldpath, newpath))
self._request(CMD_RENAME, oldpath, newpath)
def posix_rename(self, oldpath, newpath):
"""
Rename a file or folder from ``oldpath`` to ``newpath``, following
posix conventions.
:param str oldpath: existing name of the file or folder
:param str newpath: new name for the file or folder, will be
overwritten if it already exists
:raises:
``IOError`` -- if ``newpath`` is a folder, posix-rename is not
supported by the server or something else goes wrong
:versionadded: 2.2
"""
oldpath = self._adjust_cwd(oldpath)
newpath = self._adjust_cwd(newpath)
self._log(DEBUG, "posix_rename({!r}, {!r})".format(oldpath, newpath))
self._request(
CMD_EXTENDED, "posix-rename@openssh.com", oldpath, newpath
)
def mkdir(self, path, mode=o777):
"""
Create a folder (directory) named ``path`` with numeric mode ``mode``.
The default mode is 0777 (octal). On some systems, mode is ignored.
Where it is used, the current umask value is first masked out.
:param str path: name of the folder to create
:param int mode: permissions (posix-style) for the newly-created folder
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "mkdir({!r}, {!r})".format(path, mode))
attr = SFTPAttributes()
attr.st_mode = mode
self._request(CMD_MKDIR, path, attr)
def rmdir(self, path):
"""
Remove the folder named ``path``.
:param str path: name of the folder to remove
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "rmdir({!r})".format(path))
self._request(CMD_RMDIR, path)
def stat(self, path):
"""
Retrieve information about a file on the remote system. The return
value is an object whose attributes correspond to the attributes of
Python's ``stat`` structure as returned by ``os.stat``, except that it
contains fewer fields. An SFTP server may return as much or as little
info as it wants, so the results may vary from server to server.
Unlike a Python `python:stat` object, the result may not be accessed as
a tuple. This is mostly due to the author's slack factor.
The fields supported are: ``st_mode``, ``st_size``, ``st_uid``,
``st_gid``, ``st_atime``, and ``st_mtime``.
:param str path: the filename to stat
:return:
an `.SFTPAttributes` object containing attributes about the given
file
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "stat({!r})".format(path))
t, msg = self._request(CMD_STAT, path)
if t != CMD_ATTRS:
raise SFTPError("Expected attributes")
return SFTPAttributes._from_msg(msg)
def lstat(self, path):
"""
Retrieve information about a file on the remote system, without
following symbolic links (shortcuts). This otherwise behaves exactly
the same as `stat`.
:param str path: the filename to stat
:return:
an `.SFTPAttributes` object containing attributes about the given
file
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "lstat({!r})".format(path))
t, msg = self._request(CMD_LSTAT, path)
if t != CMD_ATTRS:
raise SFTPError("Expected attributes")
return SFTPAttributes._from_msg(msg)
def symlink(self, source, dest):
"""
Create a symbolic link to the ``source`` path at ``destination``.
:param str source: path of the original file
:param str dest: path of the newly created symlink
"""
dest = self._adjust_cwd(dest)
self._log(DEBUG, "symlink({!r}, {!r})".format(source, dest))
source = b(source)
self._request(CMD_SYMLINK, source, dest)
def chmod(self, path, mode):
"""
Change the mode (permissions) of a file. The permissions are
unix-style and identical to those used by Python's `os.chmod`
function.
:param str path: path of the file to change the permissions of
:param int mode: new permissions
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "chmod({!r}, {!r})".format(path, mode))
attr = SFTPAttributes()
attr.st_mode = mode
self._request(CMD_SETSTAT, path, attr)
def chown(self, path, uid, gid):
"""
Change the owner (``uid``) and group (``gid``) of a file. As with
Python's `os.chown` function, you must pass both arguments, so if you
only want to change one, use `stat` first to retrieve the current
owner and group.
:param str path: path of the file to change the owner and group of
:param int uid: new owner's uid
:param int gid: new group id
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "chown({!r}, {!r}, {!r})".format(path, uid, gid))
attr = SFTPAttributes()
attr.st_uid, attr.st_gid = uid, gid
self._request(CMD_SETSTAT, path, attr)
def utime(self, path, times):
"""
Set the access and modified times of the file specified by ``path``.
If ``times`` is ``None``, then the file's access and modified times
are set to the current time. Otherwise, ``times`` must be a 2-tuple
of numbers, of the form ``(atime, mtime)``, which is used to set the
access and modified times, respectively. This bizarre API is mimicked
from Python for the sake of consistency -- I apologize.
:param str path: path of the file to modify
:param tuple times:
``None`` or a tuple of (access time, modified time) in standard
internet epoch time (seconds since 01 January 1970 GMT)
"""
path = self._adjust_cwd(path)
if times is None:
times = (time.time(), time.time())
self._log(DEBUG, "utime({!r}, {!r})".format(path, times))
attr = SFTPAttributes()
attr.st_atime, attr.st_mtime = times
self._request(CMD_SETSTAT, path, attr)
def truncate(self, path, size):
"""
Change the size of the file specified by ``path``. This usually
extends or shrinks the size of the file, just like the `~file.truncate`
method on Python file objects.
:param str path: path of the file to modify
:param int size: the new size of the file
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "truncate({!r}, {!r})".format(path, size))
attr = SFTPAttributes()
attr.st_size = size
self._request(CMD_SETSTAT, path, attr)
def readlink(self, path):
"""
Return the target of a symbolic link (shortcut). You can use
`symlink` to create these. The result may be either an absolute or
relative pathname.
:param str path: path of the symbolic link file
:return: target path, as a `str`
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "readlink({!r})".format(path))
t, msg = self._request(CMD_READLINK, path)
if t != CMD_NAME:
raise SFTPError("Expected name response")
count = msg.get_int()
if count == 0:
return None
if count != 1:
raise SFTPError("Readlink returned {} results".format(count))
return _to_unicode(msg.get_string())
def normalize(self, path):
"""
Return the normalized path (on the server) of a given path. This
can be used to quickly resolve symbolic links or determine what the
server is considering to be the "current folder" (by passing ``'.'``
as ``path``).
:param str path: path to be normalized
:return: normalized form of the given path (as a `str`)
:raises: ``IOError`` -- if the path can't be resolved on the server
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "normalize({!r})".format(path))
t, msg = self._request(CMD_REALPATH, path)
if t != CMD_NAME:
raise SFTPError("Expected name response")
count = msg.get_int()
if count != 1:
raise SFTPError("Realpath returned {} results".format(count))
return msg.get_text()
def chdir(self, path=None):
"""
Change the "current directory" of this SFTP session. Since SFTP
doesn't really have the concept of a current working directory, this is
emulated by Paramiko. Once you use this method to set a working
directory, all operations on this `.SFTPClient` object will be relative
to that path. You can pass in ``None`` to stop using a current working
directory.
:param str path: new current working directory
:raises:
``IOError`` -- if the requested path doesn't exist on the server
.. versionadded:: 1.4
"""
if path is None:
self._cwd = None
return
if not stat.S_ISDIR(self.stat(path).st_mode):
code = errno.ENOTDIR
raise SFTPError(code, "{}: {}".format(os.strerror(code), path))
self._cwd = b(self.normalize(path))
def getcwd(self):
"""
Return the "current working directory" for this SFTP session, as
emulated by Paramiko. If no directory has been set with `chdir`,
this method will return ``None``.
.. versionadded:: 1.4
"""
# TODO: make class initialize with self._cwd set to self.normalize('.')
return self._cwd and u(self._cwd)
def _transfer_with_callback(self, reader, writer, file_size, callback):
size = 0
while True:
data = reader.read(32768)
writer.write(data)
size += len(data)
if len(data) == 0:
break
if callback is not None:
callback(size, file_size)
return size
def putfo(self, fl, remotepath, file_size=0, callback=None, confirm=True):
"""
Copy the contents of an open file object (``fl``) to the SFTP server as
``remotepath``. Any exception raised by operations will be passed
through.
The SFTP operations use pipelining for speed.
:param fl: opened file or file-like object to copy
:param str remotepath: the destination path on the SFTP server
:param int file_size:
optional size parameter passed to callback. If none is specified,
size defaults to 0
:param callable callback:
optional callback function (form: ``func(int, int)``) that accepts
the bytes transferred so far and the total bytes to be transferred
(since 1.7.4)
:param bool confirm:
whether to do a stat() on the file afterwards to confirm the file
size (since 1.7.7)
:return:
an `.SFTPAttributes` object containing attributes about the given
file.
.. versionadded:: 1.10
"""
with self.file(remotepath, "wb") as fr:
fr.set_pipelined(True)
size = self._transfer_with_callback(
reader=fl, writer=fr, file_size=file_size, callback=callback
)
if confirm:
s = self.stat(remotepath)
if s.st_size != size:
raise IOError(
"size mismatch in put! {} != {}".format(s.st_size, size)
)
else:
s = SFTPAttributes()
return s
def put(self, localpath, remotepath, callback=None, confirm=True):
"""
Copy a local file (``localpath``) to the SFTP server as ``remotepath``.
Any exception raised by operations will be passed through. This
method is primarily provided as a convenience.
The SFTP operations use pipelining for speed.
:param str localpath: the local file to copy
:param str remotepath: the destination path on the SFTP server. Note
that the filename should be included. Only specifying a directory
may result in an error.
:param callable callback:
optional callback function (form: ``func(int, int)``) that accepts
the bytes transferred so far and the total bytes to be transferred
:param bool confirm:
whether to do a stat() on the file afterwards to confirm the file
size
:return: an `.SFTPAttributes` object containing attributes about the
given file
.. versionadded:: 1.4
.. versionchanged:: 1.7.4
``callback`` and rich attribute return value added.
.. versionchanged:: 1.7.7
``confirm`` param added.
"""
file_size = os.stat(localpath).st_size
with open(localpath, "rb") as fl:
return self.putfo(fl, remotepath, file_size, callback, confirm)
def getfo(
self,
remotepath,
fl,
callback=None,
prefetch=True,
max_concurrent_prefetch_requests=None,
):
"""
Copy a remote file (``remotepath``) from the SFTP server and write to
an open file or file-like object, ``fl``. Any exception raised by
operations will be passed through. This method is primarily provided
as a convenience.
:param object remotepath: opened file or file-like object to copy to
:param str fl:
the destination path on the local host or open file object
:param callable callback:
optional callback function (form: ``func(int, int)``) that accepts
the bytes transferred so far and the total bytes to be transferred
:param bool prefetch:
controls whether prefetching is performed (default: True)
:param int max_concurrent_prefetch_requests:
The maximum number of concurrent read requests to prefetch. See
`.SFTPClient.get` (its ``max_concurrent_prefetch_requests`` param)
for details.
:return: the `number <int>` of bytes written to the opened file object
.. versionadded:: 1.10
.. versionchanged:: 2.8
Added the ``prefetch`` keyword argument.
.. versionchanged:: 3.3
Added ``max_concurrent_prefetch_requests``.
"""
file_size = self.stat(remotepath).st_size
with self.open(remotepath, "rb") as fr:
if prefetch:
fr.prefetch(file_size, max_concurrent_prefetch_requests)
return self._transfer_with_callback(
reader=fr, writer=fl, file_size=file_size, callback=callback
)
def get(
self,
remotepath,
localpath,
callback=None,
prefetch=True,
max_concurrent_prefetch_requests=None,
):
"""
Copy a remote file (``remotepath``) from the SFTP server to the local
host as ``localpath``. Any exception raised by operations will be
passed through. This method is primarily provided as a convenience.
:param str remotepath: the remote file to copy
:param str localpath: the destination path on the local host
:param callable callback:
optional callback function (form: ``func(int, int)``) that accepts
the bytes transferred so far and the total bytes to be transferred
:param bool prefetch:
controls whether prefetching is performed (default: True)
:param int max_concurrent_prefetch_requests:
The maximum number of concurrent read requests to prefetch.
When this is ``None`` (the default), do not limit the number of
concurrent prefetch requests. Note: OpenSSH's sftp internally
imposes a limit of 64 concurrent requests, while Paramiko imposes
no limit by default; consider setting a limit if a file can be
successfully received with sftp but hangs with Paramiko.
.. versionadded:: 1.4
.. versionchanged:: 1.7.4
Added the ``callback`` param
.. versionchanged:: 2.8
Added the ``prefetch`` keyword argument.
.. versionchanged:: 3.3
Added ``max_concurrent_prefetch_requests``.
"""
with open(localpath, "wb") as fl:
size = self.getfo(
remotepath,
fl,
callback,
prefetch,
max_concurrent_prefetch_requests,
)
s = os.stat(localpath)
if s.st_size != size:
raise IOError(
"size mismatch in get! {} != {}".format(s.st_size, size)
)
# ...internals...
def _request(self, t, *args):
num = self._async_request(type(None), t, *args)
return self._read_response(num)
def _async_request(self, fileobj, t, *args):
# this method may be called from other threads (prefetch)
self._lock.acquire()
try:
msg = Message()
msg.add_int(self.request_number)
for item in args:
if isinstance(item, int64):
msg.add_int64(item)
elif isinstance(item, int):
msg.add_int(item)
elif isinstance(item, SFTPAttributes):
item._pack(msg)
else:
# For all other types, rely on as_string() to either coerce
# to bytes before writing or raise a suitable exception.
msg.add_string(item)
num = self.request_number
self._expecting[num] = fileobj
self.request_number += 1
finally:
self._lock.release()
self._send_packet(t, msg)
return num
def _read_response(self, waitfor=None):
while True:
try:
t, data = self._read_packet()
except EOFError as e:
raise SSHException("Server connection dropped: {}".format(e))
msg = Message(data)
num = msg.get_int()
self._lock.acquire()
try:
if num not in self._expecting:
# might be response for a file that was closed before
# responses came back
self._log(DEBUG, "Unexpected response #{}".format(num))
if waitfor is None:
# just doing a single check
break
continue
fileobj = self._expecting[num]
del self._expecting[num]
finally:
self._lock.release()
if num == waitfor:
# synchronous
if t == CMD_STATUS:
self._convert_status(msg)
return t, msg
# can not rewrite this to deal with E721, either as a None check
# nor as not an instance of None or NoneType
if fileobj is not type(None): # noqa
fileobj._async_response(t, msg, num)
if waitfor is None:
# just doing a single check
break
return None, None
def _finish_responses(self, fileobj):
while fileobj in self._expecting.values():
self._read_response()
fileobj._check_exception()
def _convert_status(self, msg):
"""
Raises EOFError or IOError on error status; otherwise does nothing.
"""
code = msg.get_int()
text = msg.get_text()
if code == SFTP_OK:
return
elif code == SFTP_EOF:
raise EOFError(text)
elif code == SFTP_NO_SUCH_FILE:
# clever idea from john a. meinel: map the error codes to errno
raise IOError(errno.ENOENT, text)
elif code == SFTP_PERMISSION_DENIED:
raise IOError(errno.EACCES, text)
else:
raise IOError(text)
def _adjust_cwd(self, path):
"""
Return an adjusted path if we're emulating a "current working
directory" for the server.
"""
path = b(path)
if self._cwd is None:
return path
if len(path) and path[0:1] == b_slash:
# absolute path
return path
if self._cwd == b_slash:
return self._cwd + path
return self._cwd + b_slash + path
class SFTP(SFTPClient):
"""
An alias for `.SFTPClient` for backwards compatibility.
"""
pass

View File

@ -0,0 +1,594 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
SFTP file object
"""
from binascii import hexlify
from collections import deque
import socket
import threading
import time
from paramiko.common import DEBUG, io_sleep
from paramiko.file import BufferedFile
from paramiko.util import u
from paramiko.sftp import (
CMD_CLOSE,
CMD_READ,
CMD_DATA,
SFTPError,
CMD_WRITE,
CMD_STATUS,
CMD_FSTAT,
CMD_ATTRS,
CMD_FSETSTAT,
CMD_EXTENDED,
int64,
)
from paramiko.sftp_attr import SFTPAttributes
class SFTPFile(BufferedFile):
"""
Proxy object for a file on the remote server, in client mode SFTP.
Instances of this class may be used as context managers in the same way
that built-in Python file objects are.
"""
# Some sftp servers will choke if you send read/write requests larger than
# this size.
MAX_REQUEST_SIZE = 32768
def __init__(self, sftp, handle, mode="r", bufsize=-1):
BufferedFile.__init__(self)
self.sftp = sftp
self.handle = handle
BufferedFile._set_mode(self, mode, bufsize)
self.pipelined = False
self._prefetching = False
self._prefetch_done = False
self._prefetch_data = {}
self._prefetch_extents = {}
self._prefetch_lock = threading.Lock()
self._saved_exception = None
self._reqs = deque()
def __del__(self):
self._close(async_=True)
def close(self):
"""
Close the file.
"""
self._close(async_=False)
def _close(self, async_=False):
# We allow double-close without signaling an error, because real
# Python file objects do. However, we must protect against actually
# sending multiple CMD_CLOSE packets, because after we close our
# handle, the same handle may be re-allocated by the server, and we
# may end up mysteriously closing some random other file. (This is
# especially important because we unconditionally call close() from
# __del__.)
if self._closed:
return
self.sftp._log(DEBUG, "close({})".format(u(hexlify(self.handle))))
if self.pipelined:
self.sftp._finish_responses(self)
BufferedFile.close(self)
try:
if async_:
# GC'd file handle could be called from an arbitrary thread
# -- don't wait for a response
self.sftp._async_request(type(None), CMD_CLOSE, self.handle)
else:
self.sftp._request(CMD_CLOSE, self.handle)
except EOFError:
# may have outlived the Transport connection
pass
except (IOError, socket.error):
# may have outlived the Transport connection
pass
def _data_in_prefetch_requests(self, offset, size):
k = [
x for x in list(self._prefetch_extents.values()) if x[0] <= offset
]
if len(k) == 0:
return False
k.sort(key=lambda x: x[0])
buf_offset, buf_size = k[-1]
if buf_offset + buf_size <= offset:
# prefetch request ends before this one begins
return False
if buf_offset + buf_size >= offset + size:
# inclusive
return True
# well, we have part of the request. see if another chunk has
# the rest.
return self._data_in_prefetch_requests(
buf_offset + buf_size, offset + size - buf_offset - buf_size
)
def _data_in_prefetch_buffers(self, offset):
"""
if a block of data is present in the prefetch buffers, at the given
offset, return the offset of the relevant prefetch buffer. otherwise,
return None. this guarantees nothing about the number of bytes
collected in the prefetch buffer so far.
"""
k = [i for i in self._prefetch_data.keys() if i <= offset]
if len(k) == 0:
return None
index = max(k)
buf_offset = offset - index
if buf_offset >= len(self._prefetch_data[index]):
# it's not here
return None
return index
def _read_prefetch(self, size):
"""
read data out of the prefetch buffer, if possible. if the data isn't
in the buffer, return None. otherwise, behaves like a normal read.
"""
# while not closed, and haven't fetched past the current position,
# and haven't reached EOF...
while True:
offset = self._data_in_prefetch_buffers(self._realpos)
if offset is not None:
break
if self._prefetch_done or self._closed:
break
self.sftp._read_response()
self._check_exception()
if offset is None:
self._prefetching = False
return None
prefetch = self._prefetch_data[offset]
del self._prefetch_data[offset]
buf_offset = self._realpos - offset
if buf_offset > 0:
self._prefetch_data[offset] = prefetch[:buf_offset]
prefetch = prefetch[buf_offset:]
if size < len(prefetch):
self._prefetch_data[self._realpos + size] = prefetch[size:]
prefetch = prefetch[:size]
return prefetch
def _read(self, size):
size = min(size, self.MAX_REQUEST_SIZE)
if self._prefetching:
data = self._read_prefetch(size)
if data is not None:
return data
t, msg = self.sftp._request(
CMD_READ, self.handle, int64(self._realpos), int(size)
)
if t != CMD_DATA:
raise SFTPError("Expected data")
return msg.get_string()
def _write(self, data):
# may write less than requested if it would exceed max packet size
chunk = min(len(data), self.MAX_REQUEST_SIZE)
sftp_async_request = self.sftp._async_request(
type(None),
CMD_WRITE,
self.handle,
int64(self._realpos),
data[:chunk],
)
self._reqs.append(sftp_async_request)
if not self.pipelined or (
len(self._reqs) > 100 and self.sftp.sock.recv_ready()
):
while len(self._reqs):
req = self._reqs.popleft()
t, msg = self.sftp._read_response(req)
if t != CMD_STATUS:
raise SFTPError("Expected status")
# convert_status already called
return chunk
def settimeout(self, timeout):
"""
Set a timeout on read/write operations on the underlying socket or
ssh `.Channel`.
:param float timeout:
seconds to wait for a pending read/write operation before raising
``socket.timeout``, or ``None`` for no timeout
.. seealso:: `.Channel.settimeout`
"""
self.sftp.sock.settimeout(timeout)
def gettimeout(self):
"""
Returns the timeout in seconds (as a `float`) associated with the
socket or ssh `.Channel` used for this file.
.. seealso:: `.Channel.gettimeout`
"""
return self.sftp.sock.gettimeout()
def setblocking(self, blocking):
"""
Set blocking or non-blocking mode on the underiying socket or ssh
`.Channel`.
:param int blocking:
0 to set non-blocking mode; non-0 to set blocking mode.
.. seealso:: `.Channel.setblocking`
"""
self.sftp.sock.setblocking(blocking)
def seekable(self):
"""
Check if the file supports random access.
:return:
`True` if the file supports random access. If `False`,
:meth:`seek` will raise an exception
"""
return True
def seek(self, offset, whence=0):
"""
Set the file's current position.
See `file.seek` for details.
"""
self.flush()
if whence == self.SEEK_SET:
self._realpos = self._pos = offset
elif whence == self.SEEK_CUR:
self._pos += offset
self._realpos = self._pos
else:
self._realpos = self._pos = self._get_size() + offset
self._rbuffer = bytes()
def stat(self):
"""
Retrieve information about this file from the remote system. This is
exactly like `.SFTPClient.stat`, except that it operates on an
already-open file.
:returns:
an `.SFTPAttributes` object containing attributes about this file.
"""
t, msg = self.sftp._request(CMD_FSTAT, self.handle)
if t != CMD_ATTRS:
raise SFTPError("Expected attributes")
return SFTPAttributes._from_msg(msg)
def chmod(self, mode):
"""
Change the mode (permissions) of this file. The permissions are
unix-style and identical to those used by Python's `os.chmod`
function.
:param int mode: new permissions
"""
self.sftp._log(
DEBUG, "chmod({}, {!r})".format(hexlify(self.handle), mode)
)
attr = SFTPAttributes()
attr.st_mode = mode
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def chown(self, uid, gid):
"""
Change the owner (``uid``) and group (``gid``) of this file. As with
Python's `os.chown` function, you must pass both arguments, so if you
only want to change one, use `stat` first to retrieve the current
owner and group.
:param int uid: new owner's uid
:param int gid: new group id
"""
self.sftp._log(
DEBUG,
"chown({}, {!r}, {!r})".format(hexlify(self.handle), uid, gid),
)
attr = SFTPAttributes()
attr.st_uid, attr.st_gid = uid, gid
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def utime(self, times):
"""
Set the access and modified times of this file. If
``times`` is ``None``, then the file's access and modified times are
set to the current time. Otherwise, ``times`` must be a 2-tuple of
numbers, of the form ``(atime, mtime)``, which is used to set the
access and modified times, respectively. This bizarre API is mimicked
from Python for the sake of consistency -- I apologize.
:param tuple times:
``None`` or a tuple of (access time, modified time) in standard
internet epoch time (seconds since 01 January 1970 GMT)
"""
if times is None:
times = (time.time(), time.time())
self.sftp._log(
DEBUG, "utime({}, {!r})".format(hexlify(self.handle), times)
)
attr = SFTPAttributes()
attr.st_atime, attr.st_mtime = times
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def truncate(self, size):
"""
Change the size of this file. This usually extends
or shrinks the size of the file, just like the ``truncate()`` method on
Python file objects.
:param size: the new size of the file
"""
self.sftp._log(
DEBUG, "truncate({}, {!r})".format(hexlify(self.handle), size)
)
attr = SFTPAttributes()
attr.st_size = size
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def check(self, hash_algorithm, offset=0, length=0, block_size=0):
"""
Ask the server for a hash of a section of this file. This can be used
to verify a successful upload or download, or for various rsync-like
operations.
The file is hashed from ``offset``, for ``length`` bytes.
If ``length`` is 0, the remainder of the file is hashed. Thus, if both
``offset`` and ``length`` are zero, the entire file is hashed.
Normally, ``block_size`` will be 0 (the default), and this method will
return a byte string representing the requested hash (for example, a
string of length 16 for MD5, or 20 for SHA-1). If a non-zero
``block_size`` is given, each chunk of the file (from ``offset`` to
``offset + length``) of ``block_size`` bytes is computed as a separate
hash. The hash results are all concatenated and returned as a single
string.
For example, ``check('sha1', 0, 1024, 512)`` will return a string of
length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes
of the file, and the last 20 bytes will be the SHA-1 of the next 512
bytes.
:param str hash_algorithm:
the name of the hash algorithm to use (normally ``"sha1"`` or
``"md5"``)
:param offset:
offset into the file to begin hashing (0 means to start from the
beginning)
:param length:
number of bytes to hash (0 means continue to the end of the file)
:param int block_size:
number of bytes to hash per result (must not be less than 256; 0
means to compute only one hash of the entire segment)
:return:
`str` of bytes representing the hash of each block, concatenated
together
:raises:
``IOError`` -- if the server doesn't support the "check-file"
extension, or possibly doesn't support the hash algorithm requested
.. note:: Many (most?) servers don't support this extension yet.
.. versionadded:: 1.4
"""
t, msg = self.sftp._request(
CMD_EXTENDED,
"check-file",
self.handle,
hash_algorithm,
int64(offset),
int64(length),
block_size,
)
msg.get_text() # ext
msg.get_text() # alg
data = msg.get_remainder()
return data
def set_pipelined(self, pipelined=True):
"""
Turn on/off the pipelining of write operations to this file. When
pipelining is on, paramiko won't wait for the server response after
each write operation. Instead, they're collected as they come in. At
the first non-write operation (including `.close`), all remaining
server responses are collected. This means that if there was an error
with one of your later writes, an exception might be thrown from within
`.close` instead of `.write`.
By default, files are not pipelined.
:param bool pipelined:
``True`` if pipelining should be turned on for this file; ``False``
otherwise
.. versionadded:: 1.5
"""
self.pipelined = pipelined
def prefetch(self, file_size=None, max_concurrent_requests=None):
"""
Pre-fetch the remaining contents of this file in anticipation of future
`.read` calls. If reading the entire file, pre-fetching can
dramatically improve the download speed by avoiding roundtrip latency.
The file's contents are incrementally buffered in a background thread.
The prefetched data is stored in a buffer until read via the `.read`
method. Once data has been read, it's removed from the buffer. The
data may be read in a random order (using `.seek`); chunks of the
buffer that haven't been read will continue to be buffered.
:param int file_size:
When this is ``None`` (the default), this method calls `stat` to
determine the remote file size. In some situations, doing so can
cause exceptions or hangs (see `#562
<https://github.com/paramiko/paramiko/pull/562>`_); as a
workaround, one may call `stat` explicitly and pass its value in
via this parameter.
:param int max_concurrent_requests:
The maximum number of concurrent read requests to prefetch. See
`.SFTPClient.get` (its ``max_concurrent_prefetch_requests`` param)
for details.
.. versionadded:: 1.5.1
.. versionchanged:: 1.16.0
The ``file_size`` parameter was added (with no default value).
.. versionchanged:: 1.16.1
The ``file_size`` parameter was made optional for backwards
compatibility.
.. versionchanged:: 3.3
Added ``max_concurrent_requests``.
"""
if file_size is None:
file_size = self.stat().st_size
# queue up async reads for the rest of the file
chunks = []
n = self._realpos
while n < file_size:
chunk = min(self.MAX_REQUEST_SIZE, file_size - n)
chunks.append((n, chunk))
n += chunk
if len(chunks) > 0:
self._start_prefetch(chunks, max_concurrent_requests)
def readv(self, chunks, max_concurrent_prefetch_requests=None):
"""
Read a set of blocks from the file by (offset, length). This is more
efficient than doing a series of `.seek` and `.read` calls, since the
prefetch machinery is used to retrieve all the requested blocks at
once.
:param chunks:
a list of ``(offset, length)`` tuples indicating which sections of
the file to read
:param int max_concurrent_prefetch_requests:
The maximum number of concurrent read requests to prefetch. See
`.SFTPClient.get` (its ``max_concurrent_prefetch_requests`` param)
for details.
:return: a list of blocks read, in the same order as in ``chunks``
.. versionadded:: 1.5.4
.. versionchanged:: 3.3
Added ``max_concurrent_prefetch_requests``.
"""
self.sftp._log(
DEBUG, "readv({}, {!r})".format(hexlify(self.handle), chunks)
)
read_chunks = []
for offset, size in chunks:
# don't fetch data that's already in the prefetch buffer
if self._data_in_prefetch_buffers(
offset
) or self._data_in_prefetch_requests(offset, size):
continue
# break up anything larger than the max read size
while size > 0:
chunk_size = min(size, self.MAX_REQUEST_SIZE)
read_chunks.append((offset, chunk_size))
offset += chunk_size
size -= chunk_size
self._start_prefetch(read_chunks, max_concurrent_prefetch_requests)
# now we can just devolve to a bunch of read()s :)
for x in chunks:
self.seek(x[0])
yield self.read(x[1])
# ...internals...
def _get_size(self):
try:
return self.stat().st_size
except:
return 0
def _start_prefetch(self, chunks, max_concurrent_requests=None):
self._prefetching = True
self._prefetch_done = False
t = threading.Thread(
target=self._prefetch_thread,
args=(chunks, max_concurrent_requests),
)
t.daemon = True
t.start()
def _prefetch_thread(self, chunks, max_concurrent_requests):
# do these read requests in a temporary thread because there may be
# a lot of them, so it may block.
for offset, length in chunks:
# Limit the number of concurrent requests in a busy-loop
if max_concurrent_requests is not None:
while True:
with self._prefetch_lock:
pf_len = len(self._prefetch_extents)
if pf_len < max_concurrent_requests:
break
time.sleep(io_sleep)
num = self.sftp._async_request(
self, CMD_READ, self.handle, int64(offset), int(length)
)
with self._prefetch_lock:
self._prefetch_extents[num] = (offset, length)
def _async_response(self, t, msg, num):
if t == CMD_STATUS:
# save exception and re-raise it on next file operation
try:
self.sftp._convert_status(msg)
except Exception as e:
self._saved_exception = e
return
if t != CMD_DATA:
raise SFTPError("Expected data")
data = msg.get_string()
while True:
with self._prefetch_lock:
# spin if in race with _prefetch_thread
if num in self._prefetch_extents:
offset, length = self._prefetch_extents[num]
self._prefetch_data[offset] = data
del self._prefetch_extents[num]
if len(self._prefetch_extents) == 0:
self._prefetch_done = True
break
def _check_exception(self):
"""if there's a saved exception, raise & clear it"""
if self._saved_exception is not None:
x = self._saved_exception
self._saved_exception = None
raise x

View File

@ -0,0 +1,196 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Abstraction of an SFTP file handle (for server mode).
"""
import os
from paramiko.sftp import SFTP_OP_UNSUPPORTED, SFTP_OK
from paramiko.util import ClosingContextManager
class SFTPHandle(ClosingContextManager):
"""
Abstract object representing a handle to an open file (or folder) in an
SFTP server implementation. Each handle has a string representation used
by the client to refer to the underlying file.
Server implementations can (and should) subclass SFTPHandle to implement
features of a file handle, like `stat` or `chattr`.
Instances of this class may be used as context managers.
"""
def __init__(self, flags=0):
"""
Create a new file handle representing a local file being served over
SFTP. If ``flags`` is passed in, it's used to determine if the file
is open in append mode.
:param int flags: optional flags as passed to
`.SFTPServerInterface.open`
"""
self.__flags = flags
self.__name = None
# only for handles to folders:
self.__files = {}
self.__tell = None
def close(self):
"""
When a client closes a file, this method is called on the handle.
Normally you would use this method to close the underlying OS level
file object(s).
The default implementation checks for attributes on ``self`` named
``readfile`` and/or ``writefile``, and if either or both are present,
their ``close()`` methods are called. This means that if you are
using the default implementations of `read` and `write`, this
method's default implementation should be fine also.
"""
readfile = getattr(self, "readfile", None)
if readfile is not None:
readfile.close()
writefile = getattr(self, "writefile", None)
if writefile is not None:
writefile.close()
def read(self, offset, length):
"""
Read up to ``length`` bytes from this file, starting at position
``offset``. The offset may be a Python long, since SFTP allows it
to be 64 bits.
If the end of the file has been reached, this method may return an
empty string to signify EOF, or it may also return ``SFTP_EOF``.
The default implementation checks for an attribute on ``self`` named
``readfile``, and if present, performs the read operation on the Python
file-like object found there. (This is meant as a time saver for the
common case where you are wrapping a Python file object.)
:param offset: position in the file to start reading from.
:param int length: number of bytes to attempt to read.
:return: the `bytes` read, or an error code `int`.
"""
readfile = getattr(self, "readfile", None)
if readfile is None:
return SFTP_OP_UNSUPPORTED
try:
if self.__tell is None:
self.__tell = readfile.tell()
if offset != self.__tell:
readfile.seek(offset)
self.__tell = offset
data = readfile.read(length)
except IOError as e:
self.__tell = None
return SFTPServer.convert_errno(e.errno)
self.__tell += len(data)
return data
def write(self, offset, data):
"""
Write ``data`` into this file at position ``offset``. Extending the
file past its original end is expected. Unlike Python's normal
``write()`` methods, this method cannot do a partial write: it must
write all of ``data`` or else return an error.
The default implementation checks for an attribute on ``self`` named
``writefile``, and if present, performs the write operation on the
Python file-like object found there. The attribute is named
differently from ``readfile`` to make it easy to implement read-only
(or write-only) files, but if both attributes are present, they should
refer to the same file.
:param offset: position in the file to start reading from.
:param bytes data: data to write into the file.
:return: an SFTP error code like ``SFTP_OK``.
"""
writefile = getattr(self, "writefile", None)
if writefile is None:
return SFTP_OP_UNSUPPORTED
try:
# in append mode, don't care about seeking
if (self.__flags & os.O_APPEND) == 0:
if self.__tell is None:
self.__tell = writefile.tell()
if offset != self.__tell:
writefile.seek(offset)
self.__tell = offset
writefile.write(data)
writefile.flush()
except IOError as e:
self.__tell = None
return SFTPServer.convert_errno(e.errno)
if self.__tell is not None:
self.__tell += len(data)
return SFTP_OK
def stat(self):
"""
Return an `.SFTPAttributes` object referring to this open file, or an
error code. This is equivalent to `.SFTPServerInterface.stat`, except
it's called on an open file instead of a path.
:return:
an attributes object for the given file, or an SFTP error code
(like ``SFTP_PERMISSION_DENIED``).
:rtype: `.SFTPAttributes` or error code
"""
return SFTP_OP_UNSUPPORTED
def chattr(self, attr):
"""
Change the attributes of this file. The ``attr`` object will contain
only those fields provided by the client in its request, so you should
check for the presence of fields before using them.
:param .SFTPAttributes attr: the attributes to change on this file.
:return: an `int` error code like ``SFTP_OK``.
"""
return SFTP_OP_UNSUPPORTED
# ...internals...
def _set_files(self, files):
"""
Used by the SFTP server code to cache a directory listing. (In
the SFTP protocol, listing a directory is a multi-stage process
requiring a temporary handle.)
"""
self.__files = files
def _get_next_files(self):
"""
Used by the SFTP server code to retrieve a cached directory
listing.
"""
fnlist = self.__files[:16]
self.__files = self.__files[16:]
return fnlist
def _get_name(self):
return self.__name
def _set_name(self, name):
self.__name = name
from paramiko.sftp_server import SFTPServer

View File

@ -0,0 +1,537 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Server-mode SFTP support.
"""
import os
import errno
import sys
from hashlib import md5, sha1
from paramiko import util
from paramiko.sftp import (
BaseSFTP,
Message,
SFTP_FAILURE,
SFTP_PERMISSION_DENIED,
SFTP_NO_SUCH_FILE,
int64,
)
from paramiko.sftp_si import SFTPServerInterface
from paramiko.sftp_attr import SFTPAttributes
from paramiko.common import DEBUG
from paramiko.server import SubsystemHandler
from paramiko.util import b
# known hash algorithms for the "check-file" extension
from paramiko.sftp import (
CMD_HANDLE,
SFTP_DESC,
CMD_STATUS,
SFTP_EOF,
CMD_NAME,
SFTP_BAD_MESSAGE,
CMD_EXTENDED_REPLY,
SFTP_FLAG_READ,
SFTP_FLAG_WRITE,
SFTP_FLAG_APPEND,
SFTP_FLAG_CREATE,
SFTP_FLAG_TRUNC,
SFTP_FLAG_EXCL,
CMD_NAMES,
CMD_OPEN,
CMD_CLOSE,
SFTP_OK,
CMD_READ,
CMD_DATA,
CMD_WRITE,
CMD_REMOVE,
CMD_RENAME,
CMD_MKDIR,
CMD_RMDIR,
CMD_OPENDIR,
CMD_READDIR,
CMD_STAT,
CMD_ATTRS,
CMD_LSTAT,
CMD_FSTAT,
CMD_SETSTAT,
CMD_FSETSTAT,
CMD_READLINK,
CMD_SYMLINK,
CMD_REALPATH,
CMD_EXTENDED,
SFTP_OP_UNSUPPORTED,
)
_hash_class = {"sha1": sha1, "md5": md5}
class SFTPServer(BaseSFTP, SubsystemHandler):
"""
Server-side SFTP subsystem support. Since this is a `.SubsystemHandler`,
it can be (and is meant to be) set as the handler for ``"sftp"`` requests.
Use `.Transport.set_subsystem_handler` to activate this class.
"""
def __init__(
self,
channel,
name,
server,
sftp_si=SFTPServerInterface,
*args,
**kwargs
):
"""
The constructor for SFTPServer is meant to be called from within the
`.Transport` as a subsystem handler. ``server`` and any additional
parameters or keyword parameters are passed from the original call to
`.Transport.set_subsystem_handler`.
:param .Channel channel: channel passed from the `.Transport`.
:param str name: name of the requested subsystem.
:param .ServerInterface server:
the server object associated with this channel and subsystem
:param sftp_si:
a subclass of `.SFTPServerInterface` to use for handling individual
requests.
"""
BaseSFTP.__init__(self)
SubsystemHandler.__init__(self, channel, name, server)
transport = channel.get_transport()
self.logger = util.get_logger(transport.get_log_channel() + ".sftp")
self.ultra_debug = transport.get_hexdump()
self.next_handle = 1
# map of handle-string to SFTPHandle for files & folders:
self.file_table = {}
self.folder_table = {}
self.server = sftp_si(server, *args, **kwargs)
def _log(self, level, msg):
if issubclass(type(msg), list):
for m in msg:
super()._log(level, "[chan " + self.sock.get_name() + "] " + m)
else:
super()._log(level, "[chan " + self.sock.get_name() + "] " + msg)
def start_subsystem(self, name, transport, channel):
self.sock = channel
self._log(DEBUG, "Started sftp server on channel {!r}".format(channel))
self._send_server_version()
self.server.session_started()
while True:
try:
t, data = self._read_packet()
except EOFError:
self._log(DEBUG, "EOF -- end of session")
return
except Exception as e:
self._log(DEBUG, "Exception on channel: " + str(e))
self._log(DEBUG, util.tb_strings())
return
msg = Message(data)
request_number = msg.get_int()
try:
self._process(t, request_number, msg)
except Exception as e:
self._log(DEBUG, "Exception in server processing: " + str(e))
self._log(DEBUG, util.tb_strings())
# send some kind of failure message, at least
try:
self._send_status(request_number, SFTP_FAILURE)
except:
pass
def finish_subsystem(self):
self.server.session_ended()
super().finish_subsystem()
# close any file handles that were left open
# (so we can return them to the OS quickly)
for f in self.file_table.values():
f.close()
for f in self.folder_table.values():
f.close()
self.file_table = {}
self.folder_table = {}
@staticmethod
def convert_errno(e):
"""
Convert an errno value (as from an ``OSError`` or ``IOError``) into a
standard SFTP result code. This is a convenience function for trapping
exceptions in server code and returning an appropriate result.
:param int e: an errno code, as from ``OSError.errno``.
:return: an `int` SFTP error code like ``SFTP_NO_SUCH_FILE``.
"""
if e == errno.EACCES:
# permission denied
return SFTP_PERMISSION_DENIED
elif (e == errno.ENOENT) or (e == errno.ENOTDIR):
# no such file
return SFTP_NO_SUCH_FILE
else:
return SFTP_FAILURE
@staticmethod
def set_file_attr(filename, attr):
"""
Change a file's attributes on the local filesystem. The contents of
``attr`` are used to change the permissions, owner, group ownership,
and/or modification & access time of the file, depending on which
attributes are present in ``attr``.
This is meant to be a handy helper function for translating SFTP file
requests into local file operations.
:param str filename:
name of the file to alter (should usually be an absolute path).
:param .SFTPAttributes attr: attributes to change.
"""
if sys.platform != "win32":
# mode operations are meaningless on win32
if attr._flags & attr.FLAG_PERMISSIONS:
os.chmod(filename, attr.st_mode)
if attr._flags & attr.FLAG_UIDGID:
os.chown(filename, attr.st_uid, attr.st_gid)
if attr._flags & attr.FLAG_AMTIME:
os.utime(filename, (attr.st_atime, attr.st_mtime))
if attr._flags & attr.FLAG_SIZE:
with open(filename, "w+") as f:
f.truncate(attr.st_size)
# ...internals...
def _response(self, request_number, t, *args):
msg = Message()
msg.add_int(request_number)
for item in args:
# NOTE: this is a very silly tiny class used for SFTPFile mostly
if isinstance(item, int64):
msg.add_int64(item)
elif isinstance(item, int):
msg.add_int(item)
elif isinstance(item, (str, bytes)):
msg.add_string(item)
elif type(item) is SFTPAttributes:
item._pack(msg)
else:
raise Exception(
"unknown type for {!r} type {!r}".format(item, type(item))
)
self._send_packet(t, msg)
def _send_handle_response(self, request_number, handle, folder=False):
if not issubclass(type(handle), SFTPHandle):
# must be error code
self._send_status(request_number, handle)
return
handle._set_name(b("hx{:d}".format(self.next_handle)))
self.next_handle += 1
if folder:
self.folder_table[handle._get_name()] = handle
else:
self.file_table[handle._get_name()] = handle
self._response(request_number, CMD_HANDLE, handle._get_name())
def _send_status(self, request_number, code, desc=None):
if desc is None:
try:
desc = SFTP_DESC[code]
except IndexError:
desc = "Unknown"
# some clients expect a "language" tag at the end
# (but don't mind it being blank)
self._response(request_number, CMD_STATUS, code, desc, "")
def _open_folder(self, request_number, path):
resp = self.server.list_folder(path)
if issubclass(type(resp), list):
# got an actual list of filenames in the folder
folder = SFTPHandle()
folder._set_files(resp)
self._send_handle_response(request_number, folder, True)
return
# must be an error code
self._send_status(request_number, resp)
def _read_folder(self, request_number, folder):
flist = folder._get_next_files()
if len(flist) == 0:
self._send_status(request_number, SFTP_EOF)
return
msg = Message()
msg.add_int(request_number)
msg.add_int(len(flist))
for attr in flist:
msg.add_string(attr.filename)
msg.add_string(attr)
attr._pack(msg)
self._send_packet(CMD_NAME, msg)
def _check_file(self, request_number, msg):
# this extension actually comes from v6 protocol, but since it's an
# extension, i feel like we can reasonably support it backported.
# it's very useful for verifying uploaded files or checking for
# rsync-like differences between local and remote files.
handle = msg.get_binary()
alg_list = msg.get_list()
start = msg.get_int64()
length = msg.get_int64()
block_size = msg.get_int()
if handle not in self.file_table:
self._send_status(
request_number, SFTP_BAD_MESSAGE, "Invalid handle"
)
return
f = self.file_table[handle]
for x in alg_list:
if x in _hash_class:
algname = x
alg = _hash_class[x]
break
else:
self._send_status(
request_number, SFTP_FAILURE, "No supported hash types found"
)
return
if length == 0:
st = f.stat()
if not issubclass(type(st), SFTPAttributes):
self._send_status(request_number, st, "Unable to stat file")
return
length = st.st_size - start
if block_size == 0:
block_size = length
if block_size < 256:
self._send_status(
request_number, SFTP_FAILURE, "Block size too small"
)
return
sum_out = bytes()
offset = start
while offset < start + length:
blocklen = min(block_size, start + length - offset)
# don't try to read more than about 64KB at a time
chunklen = min(blocklen, 65536)
count = 0
hash_obj = alg()
while count < blocklen:
data = f.read(offset, chunklen)
if not isinstance(data, bytes):
self._send_status(
request_number, data, "Unable to hash file"
)
return
hash_obj.update(data)
count += len(data)
offset += count
sum_out += hash_obj.digest()
msg = Message()
msg.add_int(request_number)
msg.add_string("check-file")
msg.add_string(algname)
msg.add_bytes(sum_out)
self._send_packet(CMD_EXTENDED_REPLY, msg)
def _convert_pflags(self, pflags):
"""convert SFTP-style open() flags to Python's os.open() flags"""
if (pflags & SFTP_FLAG_READ) and (pflags & SFTP_FLAG_WRITE):
flags = os.O_RDWR
elif pflags & SFTP_FLAG_WRITE:
flags = os.O_WRONLY
else:
flags = os.O_RDONLY
if pflags & SFTP_FLAG_APPEND:
flags |= os.O_APPEND
if pflags & SFTP_FLAG_CREATE:
flags |= os.O_CREAT
if pflags & SFTP_FLAG_TRUNC:
flags |= os.O_TRUNC
if pflags & SFTP_FLAG_EXCL:
flags |= os.O_EXCL
return flags
def _process(self, t, request_number, msg):
self._log(DEBUG, "Request: {}".format(CMD_NAMES[t]))
if t == CMD_OPEN:
path = msg.get_text()
flags = self._convert_pflags(msg.get_int())
attr = SFTPAttributes._from_msg(msg)
self._send_handle_response(
request_number, self.server.open(path, flags, attr)
)
elif t == CMD_CLOSE:
handle = msg.get_binary()
if handle in self.folder_table:
del self.folder_table[handle]
self._send_status(request_number, SFTP_OK)
return
if handle in self.file_table:
self.file_table[handle].close()
del self.file_table[handle]
self._send_status(request_number, SFTP_OK)
return
self._send_status(
request_number, SFTP_BAD_MESSAGE, "Invalid handle"
)
elif t == CMD_READ:
handle = msg.get_binary()
offset = msg.get_int64()
length = msg.get_int()
if handle not in self.file_table:
self._send_status(
request_number, SFTP_BAD_MESSAGE, "Invalid handle"
)
return
data = self.file_table[handle].read(offset, length)
if isinstance(data, (bytes, str)):
if len(data) == 0:
self._send_status(request_number, SFTP_EOF)
else:
self._response(request_number, CMD_DATA, data)
else:
self._send_status(request_number, data)
elif t == CMD_WRITE:
handle = msg.get_binary()
offset = msg.get_int64()
data = msg.get_binary()
if handle not in self.file_table:
self._send_status(
request_number, SFTP_BAD_MESSAGE, "Invalid handle"
)
return
self._send_status(
request_number, self.file_table[handle].write(offset, data)
)
elif t == CMD_REMOVE:
path = msg.get_text()
self._send_status(request_number, self.server.remove(path))
elif t == CMD_RENAME:
oldpath = msg.get_text()
newpath = msg.get_text()
self._send_status(
request_number, self.server.rename(oldpath, newpath)
)
elif t == CMD_MKDIR:
path = msg.get_text()
attr = SFTPAttributes._from_msg(msg)
self._send_status(request_number, self.server.mkdir(path, attr))
elif t == CMD_RMDIR:
path = msg.get_text()
self._send_status(request_number, self.server.rmdir(path))
elif t == CMD_OPENDIR:
path = msg.get_text()
self._open_folder(request_number, path)
return
elif t == CMD_READDIR:
handle = msg.get_binary()
if handle not in self.folder_table:
self._send_status(
request_number, SFTP_BAD_MESSAGE, "Invalid handle"
)
return
folder = self.folder_table[handle]
self._read_folder(request_number, folder)
elif t == CMD_STAT:
path = msg.get_text()
resp = self.server.stat(path)
if issubclass(type(resp), SFTPAttributes):
self._response(request_number, CMD_ATTRS, resp)
else:
self._send_status(request_number, resp)
elif t == CMD_LSTAT:
path = msg.get_text()
resp = self.server.lstat(path)
if issubclass(type(resp), SFTPAttributes):
self._response(request_number, CMD_ATTRS, resp)
else:
self._send_status(request_number, resp)
elif t == CMD_FSTAT:
handle = msg.get_binary()
if handle not in self.file_table:
self._send_status(
request_number, SFTP_BAD_MESSAGE, "Invalid handle"
)
return
resp = self.file_table[handle].stat()
if issubclass(type(resp), SFTPAttributes):
self._response(request_number, CMD_ATTRS, resp)
else:
self._send_status(request_number, resp)
elif t == CMD_SETSTAT:
path = msg.get_text()
attr = SFTPAttributes._from_msg(msg)
self._send_status(request_number, self.server.chattr(path, attr))
elif t == CMD_FSETSTAT:
handle = msg.get_binary()
attr = SFTPAttributes._from_msg(msg)
if handle not in self.file_table:
self._response(
request_number, SFTP_BAD_MESSAGE, "Invalid handle"
)
return
self._send_status(
request_number, self.file_table[handle].chattr(attr)
)
elif t == CMD_READLINK:
path = msg.get_text()
resp = self.server.readlink(path)
if isinstance(resp, (bytes, str)):
self._response(
request_number, CMD_NAME, 1, resp, "", SFTPAttributes()
)
else:
self._send_status(request_number, resp)
elif t == CMD_SYMLINK:
# the sftp 2 draft is incorrect here!
# path always follows target_path
target_path = msg.get_text()
path = msg.get_text()
self._send_status(
request_number, self.server.symlink(target_path, path)
)
elif t == CMD_REALPATH:
path = msg.get_text()
rpath = self.server.canonicalize(path)
self._response(
request_number, CMD_NAME, 1, rpath, "", SFTPAttributes()
)
elif t == CMD_EXTENDED:
tag = msg.get_text()
if tag == "check-file":
self._check_file(request_number, msg)
elif tag == "posix-rename@openssh.com":
oldpath = msg.get_text()
newpath = msg.get_text()
self._send_status(
request_number, self.server.posix_rename(oldpath, newpath)
)
else:
self._send_status(request_number, SFTP_OP_UNSUPPORTED)
else:
self._send_status(request_number, SFTP_OP_UNSUPPORTED)
from paramiko.sftp_handle import SFTPHandle

View File

@ -0,0 +1,316 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
An interface to override for SFTP server support.
"""
import os
import sys
from paramiko.sftp import SFTP_OP_UNSUPPORTED
class SFTPServerInterface:
"""
This class defines an interface for controlling the behavior of paramiko
when using the `.SFTPServer` subsystem to provide an SFTP server.
Methods on this class are called from the SFTP session's thread, so you can
block as long as necessary without affecting other sessions (even other
SFTP sessions). However, raising an exception will usually cause the SFTP
session to abruptly end, so you will usually want to catch exceptions and
return an appropriate error code.
All paths are in string form instead of unicode because not all SFTP
clients & servers obey the requirement that paths be encoded in UTF-8.
"""
def __init__(self, server, *args, **kwargs):
"""
Create a new SFTPServerInterface object. This method does nothing by
default and is meant to be overridden by subclasses.
:param .ServerInterface server:
the server object associated with this channel and SFTP subsystem
"""
super().__init__(*args, **kwargs)
def session_started(self):
"""
The SFTP server session has just started. This method is meant to be
overridden to perform any necessary setup before handling callbacks
from SFTP operations.
"""
pass
def session_ended(self):
"""
The SFTP server session has just ended, either cleanly or via an
exception. This method is meant to be overridden to perform any
necessary cleanup before this `.SFTPServerInterface` object is
destroyed.
"""
pass
def open(self, path, flags, attr):
"""
Open a file on the server and create a handle for future operations
on that file. On success, a new object subclassed from `.SFTPHandle`
should be returned. This handle will be used for future operations
on the file (read, write, etc). On failure, an error code such as
``SFTP_PERMISSION_DENIED`` should be returned.
``flags`` contains the requested mode for opening (read-only,
write-append, etc) as a bitset of flags from the ``os`` module:
- ``os.O_RDONLY``
- ``os.O_WRONLY``
- ``os.O_RDWR``
- ``os.O_APPEND``
- ``os.O_CREAT``
- ``os.O_TRUNC``
- ``os.O_EXCL``
(One of ``os.O_RDONLY``, ``os.O_WRONLY``, or ``os.O_RDWR`` will always
be set.)
The ``attr`` object contains requested attributes of the file if it
has to be created. Some or all attribute fields may be missing if
the client didn't specify them.
.. note:: The SFTP protocol defines all files to be in "binary" mode.
There is no equivalent to Python's "text" mode.
:param str path:
the requested path (relative or absolute) of the file to be opened.
:param int flags:
flags or'd together from the ``os`` module indicating the requested
mode for opening the file.
:param .SFTPAttributes attr:
requested attributes of the file if it is newly created.
:return: a new `.SFTPHandle` or error code.
"""
return SFTP_OP_UNSUPPORTED
def list_folder(self, path):
"""
Return a list of files within a given folder. The ``path`` will use
posix notation (``"/"`` separates folder names) and may be an absolute
or relative path.
The list of files is expected to be a list of `.SFTPAttributes`
objects, which are similar in structure to the objects returned by
``os.stat``. In addition, each object should have its ``filename``
field filled in, since this is important to a directory listing and
not normally present in ``os.stat`` results. The method
`.SFTPAttributes.from_stat` will usually do what you want.
In case of an error, you should return one of the ``SFTP_*`` error
codes, such as ``SFTP_PERMISSION_DENIED``.
:param str path: the requested path (relative or absolute) to be
listed.
:return:
a list of the files in the given folder, using `.SFTPAttributes`
objects.
.. note::
You should normalize the given ``path`` first (see the `os.path`
module) and check appropriate permissions before returning the list
of files. Be careful of malicious clients attempting to use
relative paths to escape restricted folders, if you're doing a
direct translation from the SFTP server path to your local
filesystem.
"""
return SFTP_OP_UNSUPPORTED
def stat(self, path):
"""
Return an `.SFTPAttributes` object for a path on the server, or an
error code. If your server supports symbolic links (also known as
"aliases"), you should follow them. (`lstat` is the corresponding
call that doesn't follow symlinks/aliases.)
:param str path:
the requested path (relative or absolute) to fetch file statistics
for.
:return:
an `.SFTPAttributes` object for the given file, or an SFTP error
code (like ``SFTP_PERMISSION_DENIED``).
"""
return SFTP_OP_UNSUPPORTED
def lstat(self, path):
"""
Return an `.SFTPAttributes` object for a path on the server, or an
error code. If your server supports symbolic links (also known as
"aliases"), you should not follow them -- instead, you should
return data on the symlink or alias itself. (`stat` is the
corresponding call that follows symlinks/aliases.)
:param str path:
the requested path (relative or absolute) to fetch file statistics
for.
:type path: str
:return:
an `.SFTPAttributes` object for the given file, or an SFTP error
code (like ``SFTP_PERMISSION_DENIED``).
"""
return SFTP_OP_UNSUPPORTED
def remove(self, path):
"""
Delete a file, if possible.
:param str path:
the requested path (relative or absolute) of the file to delete.
:return: an SFTP error code `int` like ``SFTP_OK``.
"""
return SFTP_OP_UNSUPPORTED
def rename(self, oldpath, newpath):
"""
Rename (or move) a file. The SFTP specification implies that this
method can be used to move an existing file into a different folder,
and since there's no other (easy) way to move files via SFTP, it's
probably a good idea to implement "move" in this method too, even for
files that cross disk partition boundaries, if at all possible.
.. note:: You should return an error if a file with the same name as
``newpath`` already exists. (The rename operation should be
non-desctructive.)
.. note::
This method implements 'standard' SFTP ``RENAME`` behavior; those
seeking the OpenSSH "POSIX rename" extension behavior should use
`posix_rename`.
:param str oldpath:
the requested path (relative or absolute) of the existing file.
:param str newpath: the requested new path of the file.
:return: an SFTP error code `int` like ``SFTP_OK``.
"""
return SFTP_OP_UNSUPPORTED
def posix_rename(self, oldpath, newpath):
"""
Rename (or move) a file, following posix conventions. If newpath
already exists, it will be overwritten.
:param str oldpath:
the requested path (relative or absolute) of the existing file.
:param str newpath: the requested new path of the file.
:return: an SFTP error code `int` like ``SFTP_OK``.
:versionadded: 2.2
"""
return SFTP_OP_UNSUPPORTED
def mkdir(self, path, attr):
"""
Create a new directory with the given attributes. The ``attr``
object may be considered a "hint" and ignored.
The ``attr`` object will contain only those fields provided by the
client in its request, so you should use ``hasattr`` to check for
the presence of fields before using them. In some cases, the ``attr``
object may be completely empty.
:param str path:
requested path (relative or absolute) of the new folder.
:param .SFTPAttributes attr: requested attributes of the new folder.
:return: an SFTP error code `int` like ``SFTP_OK``.
"""
return SFTP_OP_UNSUPPORTED
def rmdir(self, path):
"""
Remove a directory if it exists. The ``path`` should refer to an
existing, empty folder -- otherwise this method should return an
error.
:param str path:
requested path (relative or absolute) of the folder to remove.
:return: an SFTP error code `int` like ``SFTP_OK``.
"""
return SFTP_OP_UNSUPPORTED
def chattr(self, path, attr):
"""
Change the attributes of a file. The ``attr`` object will contain
only those fields provided by the client in its request, so you
should check for the presence of fields before using them.
:param str path:
requested path (relative or absolute) of the file to change.
:param attr:
requested attributes to change on the file (an `.SFTPAttributes`
object)
:return: an error code `int` like ``SFTP_OK``.
"""
return SFTP_OP_UNSUPPORTED
def canonicalize(self, path):
"""
Return the canonical form of a path on the server. For example,
if the server's home folder is ``/home/foo``, the path
``"../betty"`` would be canonicalized to ``"/home/betty"``. Note
the obvious security issues: if you're serving files only from a
specific folder, you probably don't want this method to reveal path
names outside that folder.
You may find the Python methods in ``os.path`` useful, especially
``os.path.normpath`` and ``os.path.realpath``.
The default implementation returns ``os.path.normpath('/' + path)``.
"""
if os.path.isabs(path):
out = os.path.normpath(path)
else:
out = os.path.normpath("/" + path)
if sys.platform == "win32":
# on windows, normalize backslashes to sftp/posix format
out = out.replace("\\", "/")
return out
def readlink(self, path):
"""
Return the target of a symbolic link (or shortcut) on the server.
If the specified path doesn't refer to a symbolic link, an error
should be returned.
:param str path: path (relative or absolute) of the symbolic link.
:return:
the target `str` path of the symbolic link, or an error code like
``SFTP_NO_SUCH_FILE``.
"""
return SFTP_OP_UNSUPPORTED
def symlink(self, target_path, path):
"""
Create a symbolic link on the server, as new pathname ``path``,
with ``target_path`` as the target of the link.
:param str target_path:
path (relative or absolute) of the target for this new symbolic
link.
:param str path:
path (relative or absolute) of the symbolic link to create.
:return: an error code `int` like ``SFTP_OK``.
"""
return SFTP_OP_UNSUPPORTED

View File

@ -0,0 +1,250 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import socket
class SSHException(Exception):
"""
Exception raised by failures in SSH2 protocol negotiation or logic errors.
"""
pass
class AuthenticationException(SSHException):
"""
Exception raised when authentication failed for some reason. It may be
possible to retry with different credentials. (Other classes specify more
specific reasons.)
.. versionadded:: 1.6
"""
pass
class PasswordRequiredException(AuthenticationException):
"""
Exception raised when a password is needed to unlock a private key file.
"""
pass
class BadAuthenticationType(AuthenticationException):
"""
Exception raised when an authentication type (like password) is used, but
the server isn't allowing that type. (It may only allow public-key, for
example.)
.. versionadded:: 1.1
"""
allowed_types = []
# TODO 4.0: remove explanation kwarg
def __init__(self, explanation, types):
# TODO 4.0: remove this supercall unless it's actually required for
# pickling (after fixing pickling)
AuthenticationException.__init__(self, explanation, types)
self.explanation = explanation
self.allowed_types = types
def __str__(self):
return "{}; allowed types: {!r}".format(
self.explanation, self.allowed_types
)
class PartialAuthentication(AuthenticationException):
"""
An internal exception thrown in the case of partial authentication.
"""
allowed_types = []
def __init__(self, types):
AuthenticationException.__init__(self, types)
self.allowed_types = types
def __str__(self):
return "Partial authentication; allowed types: {!r}".format(
self.allowed_types
)
# TODO 4.0: stop inheriting from SSHException, move to auth.py
class UnableToAuthenticate(AuthenticationException):
pass
class ChannelException(SSHException):
"""
Exception raised when an attempt to open a new `.Channel` fails.
:param int code: the error code returned by the server
.. versionadded:: 1.6
"""
def __init__(self, code, text):
SSHException.__init__(self, code, text)
self.code = code
self.text = text
def __str__(self):
return "ChannelException({!r}, {!r})".format(self.code, self.text)
class BadHostKeyException(SSHException):
"""
The host key given by the SSH server did not match what we were expecting.
:param str hostname: the hostname of the SSH server
:param PKey got_key: the host key presented by the server
:param PKey expected_key: the host key expected
.. versionadded:: 1.6
"""
def __init__(self, hostname, got_key, expected_key):
SSHException.__init__(self, hostname, got_key, expected_key)
self.hostname = hostname
self.key = got_key
self.expected_key = expected_key
def __str__(self):
msg = "Host key for server '{}' does not match: got '{}', expected '{}'" # noqa
return msg.format(
self.hostname,
self.key.get_base64(),
self.expected_key.get_base64(),
)
class IncompatiblePeer(SSHException):
"""
A disagreement arose regarding an algorithm required for key exchange.
.. versionadded:: 2.9
"""
# TODO 4.0: consider making this annotate w/ 1..N 'missing' algorithms,
# either just the first one that would halt kex, or even updating the
# Transport logic so we record /all/ that /could/ halt kex.
# TODO: update docstrings where this may end up raised so they are more
# specific.
pass
class ProxyCommandFailure(SSHException):
"""
The "ProxyCommand" found in the .ssh/config file returned an error.
:param str command: The command line that is generating this exception.
:param str error: The error captured from the proxy command output.
"""
def __init__(self, command, error):
SSHException.__init__(self, command, error)
self.command = command
self.error = error
def __str__(self):
return 'ProxyCommand("{}") returned nonzero exit status: {}'.format(
self.command, self.error
)
class NoValidConnectionsError(socket.error):
"""
Multiple connection attempts were made and no families succeeded.
This exception class wraps multiple "real" underlying connection errors,
all of which represent failed connection attempts. Because these errors are
not guaranteed to all be of the same error type (i.e. different errno,
`socket.error` subclass, message, etc) we expose a single unified error
message and a ``None`` errno so that instances of this class match most
normal handling of `socket.error` objects.
To see the wrapped exception objects, access the ``errors`` attribute.
``errors`` is a dict whose keys are address tuples (e.g. ``('127.0.0.1',
22)``) and whose values are the exception encountered trying to connect to
that address.
It is implied/assumed that all the errors given to a single instance of
this class are from connecting to the same hostname + port (and thus that
the differences are in the resolution of the hostname - e.g. IPv4 vs v6).
.. versionadded:: 1.16
"""
def __init__(self, errors):
"""
:param dict errors:
The errors dict to store, as described by class docstring.
"""
addrs = sorted(errors.keys())
body = ", ".join([x[0] for x in addrs[:-1]])
tail = addrs[-1][0]
if body:
msg = "Unable to connect to port {0} on {1} or {2}"
else:
msg = "Unable to connect to port {0} on {2}"
super().__init__(
None, msg.format(addrs[0][1], body, tail) # stand-in for errno
)
self.errors = errors
def __reduce__(self):
return (self.__class__, (self.errors,))
class CouldNotCanonicalize(SSHException):
"""
Raised when hostname canonicalization fails & fallback is disabled.
.. versionadded:: 2.7
"""
pass
class ConfigParseError(SSHException):
"""
A fatal error was encountered trying to parse SSH config data.
Typically this means a config file violated the ``ssh_config``
specification in a manner that requires exiting immediately, such as not
matching ``key = value`` syntax or misusing certain ``Match`` keywords.
.. versionadded:: 2.7
"""
pass
class MessageOrderError(SSHException):
"""
Out-of-order protocol messages were received, violating "strict kex" mode.
.. versionadded:: 3.4
"""
pass

View File

@ -0,0 +1,778 @@
# Copyright (C) 2013-2014 science + computing ag
# Author: Sebastian Deiss <sebastian.deiss@t-online.de>
#
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
This module provides GSS-API / SSPI authentication as defined in :rfc:`4462`.
.. note:: Credential delegation is not supported in server mode.
.. seealso:: :doc:`/api/kex_gss`
.. versionadded:: 1.15
"""
import struct
import os
import sys
#: A boolean constraint that indicates if GSS-API / SSPI is available.
GSS_AUTH_AVAILABLE = True
#: A tuple of the exception types used by the underlying GSSAPI implementation.
GSS_EXCEPTIONS = ()
#: :var str _API: Constraint for the used API
_API = None
try:
import gssapi
if hasattr(gssapi, "__title__") and gssapi.__title__ == "python-gssapi":
# old, unmaintained python-gssapi package
_API = "MIT" # keep this for compatibility
GSS_EXCEPTIONS = (gssapi.GSSException,)
else:
_API = "PYTHON-GSSAPI-NEW"
GSS_EXCEPTIONS = (
gssapi.exceptions.GeneralError,
gssapi.raw.misc.GSSError,
)
except (ImportError, OSError):
try:
import pywintypes
import sspicon
import sspi
_API = "SSPI"
GSS_EXCEPTIONS = (pywintypes.error,)
except ImportError:
GSS_AUTH_AVAILABLE = False
_API = None
from paramiko.common import MSG_USERAUTH_REQUEST
from paramiko.ssh_exception import SSHException
from paramiko._version import __version_info__
def GSSAuth(auth_method, gss_deleg_creds=True):
"""
Provide SSH2 GSS-API / SSPI authentication.
:param str auth_method: The name of the SSH authentication mechanism
(gssapi-with-mic or gss-keyex)
:param bool gss_deleg_creds: Delegate client credentials or not.
We delegate credentials by default.
:return: Either an `._SSH_GSSAPI_OLD` or `._SSH_GSSAPI_NEW` (Unix)
object or an `_SSH_SSPI` (Windows) object
:rtype: object
:raises: ``ImportError`` -- If no GSS-API / SSPI module could be imported.
:see: `RFC 4462 <http://www.ietf.org/rfc/rfc4462.txt>`_
:note: Check for the available API and return either an `._SSH_GSSAPI_OLD`
(MIT GSSAPI using python-gssapi package) object, an
`._SSH_GSSAPI_NEW` (MIT GSSAPI using gssapi package) object
or an `._SSH_SSPI` (MS SSPI) object.
If there is no supported API available,
``None`` will be returned.
"""
if _API == "MIT":
return _SSH_GSSAPI_OLD(auth_method, gss_deleg_creds)
elif _API == "PYTHON-GSSAPI-NEW":
return _SSH_GSSAPI_NEW(auth_method, gss_deleg_creds)
elif _API == "SSPI" and os.name == "nt":
return _SSH_SSPI(auth_method, gss_deleg_creds)
else:
raise ImportError("Unable to import a GSS-API / SSPI module!")
class _SSH_GSSAuth:
"""
Contains the shared variables and methods of `._SSH_GSSAPI_OLD`,
`._SSH_GSSAPI_NEW` and `._SSH_SSPI`.
"""
def __init__(self, auth_method, gss_deleg_creds):
"""
:param str auth_method: The name of the SSH authentication mechanism
(gssapi-with-mic or gss-keyex)
:param bool gss_deleg_creds: Delegate client credentials or not
"""
self._auth_method = auth_method
self._gss_deleg_creds = gss_deleg_creds
self._gss_host = None
self._username = None
self._session_id = None
self._service = "ssh-connection"
"""
OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication,
so we also support the krb5 mechanism only.
"""
self._krb5_mech = "1.2.840.113554.1.2.2"
# client mode
self._gss_ctxt = None
self._gss_ctxt_status = False
# server mode
self._gss_srv_ctxt = None
self._gss_srv_ctxt_status = False
self.cc_file = None
def set_service(self, service):
"""
This is just a setter to use a non default service.
I added this method, because RFC 4462 doesn't specify "ssh-connection"
as the only service value.
:param str service: The desired SSH service
"""
if service.find("ssh-"):
self._service = service
def set_username(self, username):
"""
Setter for C{username}. If GSS-API Key Exchange is performed, the
username is not set by C{ssh_init_sec_context}.
:param str username: The name of the user who attempts to login
"""
self._username = username
def ssh_gss_oids(self, mode="client"):
"""
This method returns a single OID, because we only support the
Kerberos V5 mechanism.
:param str mode: Client for client mode and server for server mode
:return: A byte sequence containing the number of supported
OIDs, the length of the OID and the actual OID encoded with
DER
:note: In server mode we just return the OID length and the DER encoded
OID.
"""
from pyasn1.type.univ import ObjectIdentifier
from pyasn1.codec.der import encoder
OIDs = self._make_uint32(1)
krb5_OID = encoder.encode(ObjectIdentifier(self._krb5_mech))
OID_len = self._make_uint32(len(krb5_OID))
if mode == "server":
return OID_len + krb5_OID
return OIDs + OID_len + krb5_OID
def ssh_check_mech(self, desired_mech):
"""
Check if the given OID is the Kerberos V5 OID (server mode).
:param str desired_mech: The desired GSS-API mechanism of the client
:return: ``True`` if the given OID is supported, otherwise C{False}
"""
from pyasn1.codec.der import decoder
mech, __ = decoder.decode(desired_mech)
if mech.__str__() != self._krb5_mech:
return False
return True
# Internals
# -------------------------------------------------------------------------
def _make_uint32(self, integer):
"""
Create a 32 bit unsigned integer (The byte sequence of an integer).
:param int integer: The integer value to convert
:return: The byte sequence of an 32 bit integer
"""
return struct.pack("!I", integer)
def _ssh_build_mic(self, session_id, username, service, auth_method):
"""
Create the SSH2 MIC filed for gssapi-with-mic.
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:param str service: The requested SSH service
:param str auth_method: The requested SSH authentication mechanism
:return: The MIC as defined in RFC 4462. The contents of the
MIC field are:
string session_identifier,
byte SSH_MSG_USERAUTH_REQUEST,
string user-name,
string service (ssh-connection),
string authentication-method
(gssapi-with-mic or gssapi-keyex)
"""
mic = self._make_uint32(len(session_id))
mic += session_id
mic += struct.pack("B", MSG_USERAUTH_REQUEST)
mic += self._make_uint32(len(username))
mic += username.encode()
mic += self._make_uint32(len(service))
mic += service.encode()
mic += self._make_uint32(len(auth_method))
mic += auth_method.encode()
return mic
class _SSH_GSSAPI_OLD(_SSH_GSSAuth):
"""
Implementation of the GSS-API MIT Kerberos Authentication for SSH2,
using the older (unmaintained) python-gssapi package.
:see: `.GSSAuth`
"""
def __init__(self, auth_method, gss_deleg_creds):
"""
:param str auth_method: The name of the SSH authentication mechanism
(gssapi-with-mic or gss-keyex)
:param bool gss_deleg_creds: Delegate client credentials or not
"""
_SSH_GSSAuth.__init__(self, auth_method, gss_deleg_creds)
if self._gss_deleg_creds:
self._gss_flags = (
gssapi.C_PROT_READY_FLAG,
gssapi.C_INTEG_FLAG,
gssapi.C_MUTUAL_FLAG,
gssapi.C_DELEG_FLAG,
)
else:
self._gss_flags = (
gssapi.C_PROT_READY_FLAG,
gssapi.C_INTEG_FLAG,
gssapi.C_MUTUAL_FLAG,
)
def ssh_init_sec_context(
self, target, desired_mech=None, username=None, recv_token=None
):
"""
Initialize a GSS-API context.
:param str username: The name of the user who attempts to login
:param str target: The hostname of the target to connect to
:param str desired_mech: The negotiated GSS-API mechanism
("pseudo negotiated" mechanism, because we
support just the krb5 mechanism :-))
:param str recv_token: The GSS-API token received from the Server
:raises:
`.SSHException` -- Is raised if the desired mechanism of the client
is not supported
:return: A ``String`` if the GSS-API has returned a token or
``None`` if no token was returned
"""
from pyasn1.codec.der import decoder
self._username = username
self._gss_host = target
targ_name = gssapi.Name(
"host@" + self._gss_host, gssapi.C_NT_HOSTBASED_SERVICE
)
ctx = gssapi.Context()
ctx.flags = self._gss_flags
if desired_mech is None:
krb5_mech = gssapi.OID.mech_from_string(self._krb5_mech)
else:
mech, __ = decoder.decode(desired_mech)
if mech.__str__() != self._krb5_mech:
raise SSHException("Unsupported mechanism OID.")
else:
krb5_mech = gssapi.OID.mech_from_string(self._krb5_mech)
token = None
try:
if recv_token is None:
self._gss_ctxt = gssapi.InitContext(
peer_name=targ_name,
mech_type=krb5_mech,
req_flags=ctx.flags,
)
token = self._gss_ctxt.step(token)
else:
token = self._gss_ctxt.step(recv_token)
except gssapi.GSSException:
message = "{} Target: {}".format(sys.exc_info()[1], self._gss_host)
raise gssapi.GSSException(message)
self._gss_ctxt_status = self._gss_ctxt.established
return token
def ssh_get_mic(self, session_id, gss_kex=False):
"""
Create the MIC token for a SSH2 message.
:param str session_id: The SSH session ID
:param bool gss_kex: Generate the MIC for GSS-API Key Exchange or not
:return: gssapi-with-mic:
Returns the MIC token from GSS-API for the message we created
with ``_ssh_build_mic``.
gssapi-keyex:
Returns the MIC token from GSS-API with the SSH session ID as
message.
"""
self._session_id = session_id
if not gss_kex:
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
mic_token = self._gss_ctxt.get_mic(mic_field)
else:
# for key exchange with gssapi-keyex
mic_token = self._gss_srv_ctxt.get_mic(self._session_id)
return mic_token
def ssh_accept_sec_context(self, hostname, recv_token, username=None):
"""
Accept a GSS-API context (server mode).
:param str hostname: The servers hostname
:param str username: The name of the user who attempts to login
:param str recv_token: The GSS-API Token received from the server,
if it's not the initial call.
:return: A ``String`` if the GSS-API has returned a token or ``None``
if no token was returned
"""
# hostname and username are not required for GSSAPI, but for SSPI
self._gss_host = hostname
self._username = username
if self._gss_srv_ctxt is None:
self._gss_srv_ctxt = gssapi.AcceptContext()
token = self._gss_srv_ctxt.step(recv_token)
self._gss_srv_ctxt_status = self._gss_srv_ctxt.established
return token
def ssh_check_mic(self, mic_token, session_id, username=None):
"""
Verify the MIC token for a SSH2 message.
:param str mic_token: The MIC token received from the client
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:return: None if the MIC check was successful
:raises: ``gssapi.GSSException`` -- if the MIC check failed
"""
self._session_id = session_id
self._username = username
if self._username is not None:
# server mode
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
self._gss_srv_ctxt.verify_mic(mic_field, mic_token)
else:
# for key exchange with gssapi-keyex
# client mode
self._gss_ctxt.verify_mic(self._session_id, mic_token)
@property
def credentials_delegated(self):
"""
Checks if credentials are delegated (server mode).
:return: ``True`` if credentials are delegated, otherwise ``False``
"""
if self._gss_srv_ctxt.delegated_cred is not None:
return True
return False
def save_client_creds(self, client_token):
"""
Save the Client token in a file. This is used by the SSH server
to store the client credentials if credentials are delegated
(server mode).
:param str client_token: The GSS-API token received form the client
:raises:
``NotImplementedError`` -- Credential delegation is currently not
supported in server mode
"""
raise NotImplementedError
if __version_info__ < (2, 5):
# provide the old name for strict backward compatibility
_SSH_GSSAPI = _SSH_GSSAPI_OLD
class _SSH_GSSAPI_NEW(_SSH_GSSAuth):
"""
Implementation of the GSS-API MIT Kerberos Authentication for SSH2,
using the newer, currently maintained gssapi package.
:see: `.GSSAuth`
"""
def __init__(self, auth_method, gss_deleg_creds):
"""
:param str auth_method: The name of the SSH authentication mechanism
(gssapi-with-mic or gss-keyex)
:param bool gss_deleg_creds: Delegate client credentials or not
"""
_SSH_GSSAuth.__init__(self, auth_method, gss_deleg_creds)
if self._gss_deleg_creds:
self._gss_flags = (
gssapi.RequirementFlag.protection_ready,
gssapi.RequirementFlag.integrity,
gssapi.RequirementFlag.mutual_authentication,
gssapi.RequirementFlag.delegate_to_peer,
)
else:
self._gss_flags = (
gssapi.RequirementFlag.protection_ready,
gssapi.RequirementFlag.integrity,
gssapi.RequirementFlag.mutual_authentication,
)
def ssh_init_sec_context(
self, target, desired_mech=None, username=None, recv_token=None
):
"""
Initialize a GSS-API context.
:param str username: The name of the user who attempts to login
:param str target: The hostname of the target to connect to
:param str desired_mech: The negotiated GSS-API mechanism
("pseudo negotiated" mechanism, because we
support just the krb5 mechanism :-))
:param str recv_token: The GSS-API token received from the Server
:raises: `.SSHException` -- Is raised if the desired mechanism of the
client is not supported
:raises: ``gssapi.exceptions.GSSError`` if there is an error signaled
by the GSS-API implementation
:return: A ``String`` if the GSS-API has returned a token or ``None``
if no token was returned
"""
from pyasn1.codec.der import decoder
self._username = username
self._gss_host = target
targ_name = gssapi.Name(
"host@" + self._gss_host,
name_type=gssapi.NameType.hostbased_service,
)
if desired_mech is not None:
mech, __ = decoder.decode(desired_mech)
if mech.__str__() != self._krb5_mech:
raise SSHException("Unsupported mechanism OID.")
krb5_mech = gssapi.MechType.kerberos
token = None
if recv_token is None:
self._gss_ctxt = gssapi.SecurityContext(
name=targ_name,
flags=self._gss_flags,
mech=krb5_mech,
usage="initiate",
)
token = self._gss_ctxt.step(token)
else:
token = self._gss_ctxt.step(recv_token)
self._gss_ctxt_status = self._gss_ctxt.complete
return token
def ssh_get_mic(self, session_id, gss_kex=False):
"""
Create the MIC token for a SSH2 message.
:param str session_id: The SSH session ID
:param bool gss_kex: Generate the MIC for GSS-API Key Exchange or not
:return: gssapi-with-mic:
Returns the MIC token from GSS-API for the message we created
with ``_ssh_build_mic``.
gssapi-keyex:
Returns the MIC token from GSS-API with the SSH session ID as
message.
:rtype: str
"""
self._session_id = session_id
if not gss_kex:
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
mic_token = self._gss_ctxt.get_signature(mic_field)
else:
# for key exchange with gssapi-keyex
mic_token = self._gss_srv_ctxt.get_signature(self._session_id)
return mic_token
def ssh_accept_sec_context(self, hostname, recv_token, username=None):
"""
Accept a GSS-API context (server mode).
:param str hostname: The servers hostname
:param str username: The name of the user who attempts to login
:param str recv_token: The GSS-API Token received from the server,
if it's not the initial call.
:return: A ``String`` if the GSS-API has returned a token or ``None``
if no token was returned
"""
# hostname and username are not required for GSSAPI, but for SSPI
self._gss_host = hostname
self._username = username
if self._gss_srv_ctxt is None:
self._gss_srv_ctxt = gssapi.SecurityContext(usage="accept")
token = self._gss_srv_ctxt.step(recv_token)
self._gss_srv_ctxt_status = self._gss_srv_ctxt.complete
return token
def ssh_check_mic(self, mic_token, session_id, username=None):
"""
Verify the MIC token for a SSH2 message.
:param str mic_token: The MIC token received from the client
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:return: None if the MIC check was successful
:raises: ``gssapi.exceptions.GSSError`` -- if the MIC check failed
"""
self._session_id = session_id
self._username = username
if self._username is not None:
# server mode
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
self._gss_srv_ctxt.verify_signature(mic_field, mic_token)
else:
# for key exchange with gssapi-keyex
# client mode
self._gss_ctxt.verify_signature(self._session_id, mic_token)
@property
def credentials_delegated(self):
"""
Checks if credentials are delegated (server mode).
:return: ``True`` if credentials are delegated, otherwise ``False``
:rtype: bool
"""
if self._gss_srv_ctxt.delegated_creds is not None:
return True
return False
def save_client_creds(self, client_token):
"""
Save the Client token in a file. This is used by the SSH server
to store the client credentials if credentials are delegated
(server mode).
:param str client_token: The GSS-API token received form the client
:raises: ``NotImplementedError`` -- Credential delegation is currently
not supported in server mode
"""
raise NotImplementedError
class _SSH_SSPI(_SSH_GSSAuth):
"""
Implementation of the Microsoft SSPI Kerberos Authentication for SSH2.
:see: `.GSSAuth`
"""
def __init__(self, auth_method, gss_deleg_creds):
"""
:param str auth_method: The name of the SSH authentication mechanism
(gssapi-with-mic or gss-keyex)
:param bool gss_deleg_creds: Delegate client credentials or not
"""
_SSH_GSSAuth.__init__(self, auth_method, gss_deleg_creds)
if self._gss_deleg_creds:
self._gss_flags = (
sspicon.ISC_REQ_INTEGRITY
| sspicon.ISC_REQ_MUTUAL_AUTH
| sspicon.ISC_REQ_DELEGATE
)
else:
self._gss_flags = (
sspicon.ISC_REQ_INTEGRITY | sspicon.ISC_REQ_MUTUAL_AUTH
)
def ssh_init_sec_context(
self, target, desired_mech=None, username=None, recv_token=None
):
"""
Initialize a SSPI context.
:param str username: The name of the user who attempts to login
:param str target: The FQDN of the target to connect to
:param str desired_mech: The negotiated SSPI mechanism
("pseudo negotiated" mechanism, because we
support just the krb5 mechanism :-))
:param recv_token: The SSPI token received from the Server
:raises:
`.SSHException` -- Is raised if the desired mechanism of the client
is not supported
:return: A ``String`` if the SSPI has returned a token or ``None`` if
no token was returned
"""
from pyasn1.codec.der import decoder
self._username = username
self._gss_host = target
error = 0
targ_name = "host/" + self._gss_host
if desired_mech is not None:
mech, __ = decoder.decode(desired_mech)
if mech.__str__() != self._krb5_mech:
raise SSHException("Unsupported mechanism OID.")
try:
if recv_token is None:
self._gss_ctxt = sspi.ClientAuth(
"Kerberos", scflags=self._gss_flags, targetspn=targ_name
)
error, token = self._gss_ctxt.authorize(recv_token)
token = token[0].Buffer
except pywintypes.error as e:
e.strerror += ", Target: {}".format(self._gss_host)
raise
if error == 0:
"""
if the status is GSS_COMPLETE (error = 0) the context is fully
established an we can set _gss_ctxt_status to True.
"""
self._gss_ctxt_status = True
token = None
"""
You won't get another token if the context is fully established,
so i set token to None instead of ""
"""
return token
def ssh_get_mic(self, session_id, gss_kex=False):
"""
Create the MIC token for a SSH2 message.
:param str session_id: The SSH session ID
:param bool gss_kex: Generate the MIC for Key Exchange with SSPI or not
:return: gssapi-with-mic:
Returns the MIC token from SSPI for the message we created
with ``_ssh_build_mic``.
gssapi-keyex:
Returns the MIC token from SSPI with the SSH session ID as
message.
"""
self._session_id = session_id
if not gss_kex:
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
mic_token = self._gss_ctxt.sign(mic_field)
else:
# for key exchange with gssapi-keyex
mic_token = self._gss_srv_ctxt.sign(self._session_id)
return mic_token
def ssh_accept_sec_context(self, hostname, username, recv_token):
"""
Accept a SSPI context (server mode).
:param str hostname: The servers FQDN
:param str username: The name of the user who attempts to login
:param str recv_token: The SSPI Token received from the server,
if it's not the initial call.
:return: A ``String`` if the SSPI has returned a token or ``None`` if
no token was returned
"""
self._gss_host = hostname
self._username = username
targ_name = "host/" + self._gss_host
self._gss_srv_ctxt = sspi.ServerAuth("Kerberos", spn=targ_name)
error, token = self._gss_srv_ctxt.authorize(recv_token)
token = token[0].Buffer
if error == 0:
self._gss_srv_ctxt_status = True
token = None
return token
def ssh_check_mic(self, mic_token, session_id, username=None):
"""
Verify the MIC token for a SSH2 message.
:param str mic_token: The MIC token received from the client
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:return: None if the MIC check was successful
:raises: ``sspi.error`` -- if the MIC check failed
"""
self._session_id = session_id
self._username = username
if username is not None:
# server mode
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
# Verifies data and its signature. If verification fails, an
# sspi.error will be raised.
self._gss_srv_ctxt.verify(mic_field, mic_token)
else:
# for key exchange with gssapi-keyex
# client mode
# Verifies data and its signature. If verification fails, an
# sspi.error will be raised.
self._gss_ctxt.verify(self._session_id, mic_token)
@property
def credentials_delegated(self):
"""
Checks if credentials are delegated (server mode).
:return: ``True`` if credentials are delegated, otherwise ``False``
"""
return self._gss_flags & sspicon.ISC_REQ_DELEGATE and (
self._gss_srv_ctxt_status or self._gss_flags
)
def save_client_creds(self, client_token):
"""
Save the Client token in a file. This is used by the SSH server
to store the client credentails if credentials are delegated
(server mode).
:param str client_token: The SSPI token received form the client
:raises:
``NotImplementedError`` -- Credential delegation is currently not
supported in server mode
"""
raise NotImplementedError

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,337 @@
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Useful functions used by the rest of paramiko.
"""
import sys
import struct
import traceback
import threading
import logging
from paramiko.common import (
DEBUG,
zero_byte,
xffffffff,
max_byte,
byte_ord,
byte_chr,
)
from paramiko.config import SSHConfig
def inflate_long(s, always_positive=False):
"""turns a normalized byte string into a long-int
(adapted from Crypto.Util.number)"""
out = 0
negative = 0
if not always_positive and (len(s) > 0) and (byte_ord(s[0]) >= 0x80):
negative = 1
if len(s) % 4:
filler = zero_byte
if negative:
filler = max_byte
# never convert this to ``s +=`` because this is a string, not a number
# noinspection PyAugmentAssignment
s = filler * (4 - len(s) % 4) + s
for i in range(0, len(s), 4):
out = (out << 32) + struct.unpack(">I", s[i : i + 4])[0]
if negative:
out -= 1 << (8 * len(s))
return out
def deflate_long(n, add_sign_padding=True):
"""turns a long-int into a normalized byte string
(adapted from Crypto.Util.number)"""
# after much testing, this algorithm was deemed to be the fastest
s = bytes()
n = int(n)
while (n != 0) and (n != -1):
s = struct.pack(">I", n & xffffffff) + s
n >>= 32
# strip off leading zeros, FFs
for i in enumerate(s):
if (n == 0) and (i[1] != 0):
break
if (n == -1) and (i[1] != 0xFF):
break
else:
# degenerate case, n was either 0 or -1
i = (0,)
if n == 0:
s = zero_byte
else:
s = max_byte
s = s[i[0] :]
if add_sign_padding:
if (n == 0) and (byte_ord(s[0]) >= 0x80):
s = zero_byte + s
if (n == -1) and (byte_ord(s[0]) < 0x80):
s = max_byte + s
return s
def format_binary(data, prefix=""):
x = 0
out = []
while len(data) > x + 16:
out.append(format_binary_line(data[x : x + 16]))
x += 16
if x < len(data):
out.append(format_binary_line(data[x:]))
return [prefix + line for line in out]
def format_binary_line(data):
left = " ".join(["{:02X}".format(byte_ord(c)) for c in data])
right = "".join(
[".{:c}..".format(byte_ord(c))[(byte_ord(c) + 63) // 95] for c in data]
)
return "{:50s} {}".format(left, right)
def safe_string(s):
out = b""
for c in s:
i = byte_ord(c)
if 32 <= i <= 127:
out += byte_chr(i)
else:
out += b("%{:02X}".format(i))
return out
def bit_length(n):
try:
return n.bit_length()
except AttributeError:
norm = deflate_long(n, False)
hbyte = byte_ord(norm[0])
if hbyte == 0:
return 1
bitlen = len(norm) * 8
while not (hbyte & 0x80):
hbyte <<= 1
bitlen -= 1
return bitlen
def tb_strings():
return "".join(traceback.format_exception(*sys.exc_info())).split("\n")
def generate_key_bytes(hash_alg, salt, key, nbytes):
"""
Given a password, passphrase, or other human-source key, scramble it
through a secure hash into some keyworthy bytes. This specific algorithm
is used for encrypting/decrypting private key files.
:param function hash_alg: A function which creates a new hash object, such
as ``hashlib.sha256``.
:param salt: data to salt the hash with.
:type bytes salt: Hash salt bytes.
:param str key: human-entered password or passphrase.
:param int nbytes: number of bytes to generate.
:return: Key data, as `bytes`.
"""
keydata = bytes()
digest = bytes()
if len(salt) > 8:
salt = salt[:8]
while nbytes > 0:
hash_obj = hash_alg()
if len(digest) > 0:
hash_obj.update(digest)
hash_obj.update(b(key))
hash_obj.update(salt)
digest = hash_obj.digest()
size = min(nbytes, len(digest))
keydata += digest[:size]
nbytes -= size
return keydata
def load_host_keys(filename):
"""
Read a file of known SSH host keys, in the format used by openssh, and
return a compound dict of ``hostname -> keytype ->`` `PKey
<paramiko.pkey.PKey>`. The hostname may be an IP address or DNS name. The
keytype will be either ``"ssh-rsa"`` or ``"ssh-dss"``.
This type of file unfortunately doesn't exist on Windows, but on posix,
it will usually be stored in ``os.path.expanduser("~/.ssh/known_hosts")``.
Since 1.5.3, this is just a wrapper around `.HostKeys`.
:param str filename: name of the file to read host keys from
:return:
nested dict of `.PKey` objects, indexed by hostname and then keytype
"""
from paramiko.hostkeys import HostKeys
return HostKeys(filename)
def parse_ssh_config(file_obj):
"""
Provided only as a backward-compatible wrapper around `.SSHConfig`.
.. deprecated:: 2.7
Use `SSHConfig.from_file` instead.
"""
config = SSHConfig()
config.parse(file_obj)
return config
def lookup_ssh_host_config(hostname, config):
"""
Provided only as a backward-compatible wrapper around `.SSHConfig`.
"""
return config.lookup(hostname)
def mod_inverse(x, m):
# it's crazy how small Python can make this function.
u1, u2, u3 = 1, 0, m
v1, v2, v3 = 0, 1, x
while v3 > 0:
q = u3 // v3
u1, v1 = v1, u1 - v1 * q
u2, v2 = v2, u2 - v2 * q
u3, v3 = v3, u3 - v3 * q
if u2 < 0:
u2 += m
return u2
_g_thread_data = threading.local()
_g_thread_counter = 0
_g_thread_lock = threading.Lock()
def get_thread_id():
global _g_thread_data, _g_thread_counter, _g_thread_lock
try:
return _g_thread_data.id
except AttributeError:
with _g_thread_lock:
_g_thread_counter += 1
_g_thread_data.id = _g_thread_counter
return _g_thread_data.id
def log_to_file(filename, level=DEBUG):
"""send paramiko logs to a logfile,
if they're not already going somewhere"""
logger = logging.getLogger("paramiko")
if len(logger.handlers) > 0:
return
logger.setLevel(level)
f = open(filename, "a")
handler = logging.StreamHandler(f)
frm = "%(levelname)-.3s [%(asctime)s.%(msecs)03d] thr=%(_threadid)-3d"
frm += " %(name)s: %(message)s"
handler.setFormatter(logging.Formatter(frm, "%Y%m%d-%H:%M:%S"))
logger.addHandler(handler)
# make only one filter object, so it doesn't get applied more than once
class PFilter:
def filter(self, record):
record._threadid = get_thread_id()
return True
_pfilter = PFilter()
def get_logger(name):
logger = logging.getLogger(name)
logger.addFilter(_pfilter)
return logger
def constant_time_bytes_eq(a, b):
if len(a) != len(b):
return False
res = 0
# noinspection PyUnresolvedReferences
for i in range(len(a)): # noqa: F821
res |= byte_ord(a[i]) ^ byte_ord(b[i])
return res == 0
class ClosingContextManager:
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def clamp_value(minimum, val, maximum):
return max(minimum, min(val, maximum))
def asbytes(s):
"""
Coerce to bytes if possible or return unchanged.
"""
try:
# Attempt to run through our version of b(), which does the Right Thing
# for unicode strings vs bytestrings, and raises TypeError if it's not
# one of those types.
return b(s)
except TypeError:
try:
# If it wasn't a string/byte/buffer-ish object, try calling an
# asbytes() method, which many of our internal classes implement.
return s.asbytes()
except AttributeError:
# Finally, just do nothing & assume this object is sufficiently
# byte-y or buffer-y that everything will work out (or that callers
# are capable of handling whatever it is.)
return s
# TODO: clean this up / force callers to assume bytes OR unicode
def b(s, encoding="utf8"):
"""cast unicode or bytes to bytes"""
if isinstance(s, bytes):
return s
elif isinstance(s, str):
return s.encode(encoding)
else:
raise TypeError(f"Expected unicode or bytes, got {type(s)}")
# TODO: clean this up / force callers to assume bytes OR unicode
def u(s, encoding="utf8"):
"""cast bytes or unicode to unicode"""
if isinstance(s, bytes):
return s.decode(encoding)
elif isinstance(s, str):
return s
else:
raise TypeError(f"Expected unicode or bytes, got {type(s)}")

View File

@ -0,0 +1,56 @@
# Copyright (C) 2021 Lew Gordon <lew.gordon@genesys.com>
# Copyright (C) 2022 Patrick Spendrin <ps_ml@gmx.de>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os.path
import time
PIPE_NAME = r"\\.\pipe\openssh-ssh-agent"
def can_talk_to_agent():
# use os.listdir() instead of os.path.exists(), because os.path.exists()
# uses CreateFileW() API and the pipe cannot be reopen unless the server
# calls DisconnectNamedPipe().
dir_, name = os.path.split(PIPE_NAME)
name = name.lower()
return any(name == n.lower() for n in os.listdir(dir_))
class OpenSSHAgentConnection:
def __init__(self):
while True:
try:
self._pipe = os.open(PIPE_NAME, os.O_RDWR | os.O_BINARY)
except OSError as e:
# retry when errno 22 which means that the server has not
# called DisconnectNamedPipe() yet.
if e.errno != 22:
raise
else:
break
time.sleep(0.1)
def send(self, data):
return os.write(self._pipe, data)
def recv(self, n):
return os.read(self._pipe, n)
def close(self):
return os.close(self._pipe)

View File

@ -0,0 +1,138 @@
# Copyright (C) 2005 John Arbash-Meinel <john@arbash-meinel.com>
# Modified up by: Todd Whiteman <ToddW@ActiveState.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Functions for communicating with Pageant, the basic windows ssh agent program.
"""
import array
import ctypes.wintypes
import platform
import struct
from paramiko.common import zero_byte
from paramiko.util import b
import _thread as thread
from . import _winapi
_AGENT_COPYDATA_ID = 0x804E50BA
_AGENT_MAX_MSGLEN = 8192
# Note: The WM_COPYDATA value is pulled from win32con, as a workaround
# so we do not have to import this huge library just for this one variable.
win32con_WM_COPYDATA = 74
def _get_pageant_window_object():
return ctypes.windll.user32.FindWindowA(b"Pageant", b"Pageant")
def can_talk_to_agent():
"""
Check to see if there is a "Pageant" agent we can talk to.
This checks both if we have the required libraries (win32all or ctypes)
and if there is a Pageant currently running.
"""
return bool(_get_pageant_window_object())
if platform.architecture()[0] == "64bit":
ULONG_PTR = ctypes.c_uint64
else:
ULONG_PTR = ctypes.c_uint32
class COPYDATASTRUCT(ctypes.Structure):
"""
ctypes implementation of
http://msdn.microsoft.com/en-us/library/windows/desktop/ms649010%28v=vs.85%29.aspx
"""
_fields_ = [
("num_data", ULONG_PTR),
("data_size", ctypes.wintypes.DWORD),
("data_loc", ctypes.c_void_p),
]
def _query_pageant(msg):
"""
Communication with the Pageant process is done through a shared
memory-mapped file.
"""
hwnd = _get_pageant_window_object()
if not hwnd:
# Raise a failure to connect exception, pageant isn't running anymore!
return None
# create a name for the mmap
map_name = f"PageantRequest{thread.get_ident():08x}"
pymap = _winapi.MemoryMap(
map_name, _AGENT_MAX_MSGLEN, _winapi.get_security_attributes_for_user()
)
with pymap:
pymap.write(msg)
# Create an array buffer containing the mapped filename
char_buffer = array.array("b", b(map_name) + zero_byte) # noqa
char_buffer_address, char_buffer_size = char_buffer.buffer_info()
# Create a string to use for the SendMessage function call
cds = COPYDATASTRUCT(
_AGENT_COPYDATA_ID, char_buffer_size, char_buffer_address
)
response = ctypes.windll.user32.SendMessageA(
hwnd, win32con_WM_COPYDATA, ctypes.sizeof(cds), ctypes.byref(cds)
)
if response > 0:
pymap.seek(0)
datalen = pymap.read(4)
retlen = struct.unpack(">I", datalen)[0]
return datalen + pymap.read(retlen)
return None
class PageantConnection:
"""
Mock "connection" to an agent which roughly approximates the behavior of
a unix local-domain socket (as used by Agent). Requests are sent to the
pageant daemon via special Windows magick, and responses are buffered back
for subsequent reads.
"""
def __init__(self):
self._response = None
def send(self, data):
self._response = _query_pageant(data)
def recv(self, n):
if self._response is None:
return ""
ret = self._response[:n]
self._response = self._response[n:]
if self._response == "":
self._response = None
return ret
def close(self):
pass