Explanation: Here we define a metaclass that automatically registers every subclass in a global registry. This allows dynamic lookup and plugin architectures without explicit imports.
class RegistryMeta(type):
_registry = {}
def __new__(mcs, name, bases, attrs):
cls = super().__new__(mcs, name, bases, attrs)
if name != 'BasePlugin':
mcs._registry[name] = cls
return cls
@classmethod
def get_plugin(mcs, name):
return mcs._registry[name]
class BasePlugin(metaclass=RegistryMeta):
def run(self, *args, **kwargs):
raise NotImplementedError
# Example subclass
class MyPlugin(BasePlugin):
def run(self, data):
return data[::-1]
# Usage
PluginClass = RegistryMeta.get_plugin('MyPlugin')
plugin = PluginClass()
print(plugin.run('hello world')) # dlrow olleh
Explanation: We load a function’s bytecode, modify opcodes on the fly (e.g., inject logging at entry), rebuild the code object, and replace the original. This demands deep understanding of Python’s bytecode and the types.CodeType signature.
import dis, types
def trace_entry(func):
co = func.__code__
instructions = list(dis.Bytecode(co))
new_insts = []
# Inject a PRINT_OP at start
new_insts.append(dis.Instruction('LOAD_GLOBAL', co.co_names.index('print'), None, None, None, None, None))
new_insts.append(dis.Instruction('LOAD_CONST', co.co_consts.index(f'Entering {func.__name__}'), None, None, None, None, None))
new_insts.append(dis.Instruction('CALL_FUNCTION', 1, None, None, None, None, None))
new_insts.append(dis.Instruction('POP_TOP', None, None, None, None, None, None))
new_insts.extend(instructions)
bytecode = b''.join([inst.to_bytes() for inst in new_insts])
new_code = types.CodeType(
co.co_argcount,
co.co_posonlyargcount,
co.co_kwonlyargcount,
co.co_nlocals,
co.co_stacksize + 2,
co.co_flags,
bytecode,
co.co_consts,
co.co_names,
co.co_varnames,
co.co_filename,
co.co_name,
co.co_firstlineno,
co.co_lnotab,
co.co_freevars,
co.co_cellvars
)
func.__code__ = new_code
return func
@trace_entry
def complex_calc(x, y):
return x * y + (x - y)
print(complex_calc(10, 5))
Explanation: This shows the boilerplate for a C extension exposing a high‑performance `matrix_multiply` to Python. It involves managing PyObject references and ensuring thread safety via the GIL.
// matrixmodule.c
#include
static PyObject* matmul(PyObject* self, PyObject* args) {
Py_buffer A, B;
if (!PyArg_ParseTuple(args, "y*y*", &A, &B)) return NULL;
// Assume square N×N, pointer arithmetic, fast loops...
double* a = (double*)A.buf;
double* b = (double*)B.buf;
Py_buffer C;
PyBuffer_FillInfo(&C, NULL, malloc(A.len), A.len, 1, PyBUF_CONTIG);
double* c = (double*)C.buf;
int N = (int)sqrt(A.len / sizeof(double));
for (int i=0; i
Explanation: Subclass the default selector event loop to insert a custom scheduling strategy (priority queue) and integrate low-level watcher callbacks without blocking the loop.
import asyncio
import heapq
import time
class PriorityEventLoop(asyncio.SelectorEventLoop):
def __init__(self):
super().__init__()
self._pq = []
def call_later(self, delay, callback, *args):
when = self.time() + delay
heapq.heappush(self._pq, (when, callback, args))
return super().call_later(delay, self._drain_pq)
def _drain_pq(self):
now = self.time()
while self._pq and self._pq[0][0] <= now:
_, cb, args = heapq.heappop(self._pq)
cb(*args)
async def high_priority():
print("High priority at", time.time())
async def main():
loop = asyncio.get_event_loop()
loop.call_later(0.1, lambda: print("Normal priority"))
loop.call_later(0.05, lambda: print("Lower delay priority"))
await asyncio.sleep(0.2)
asyncio.set_event_loop(PriorityEventLoop())
asyncio.run(main())
Explanation: We force a full GC, inspect object refcounts, then use `ctypes` to manipulate an object’s internals (e.g., turn a tuple into a list in-place). This is extremely unsafe and for experimental use only.
import gc, ctypes
from ctypes import pythonapi, py_object
# Force full collection
gc.collect()
# Inspect all objects of type tuple
tuples = [obj for obj in gc.get_objects() if isinstance(obj, tuple)]
# Pick one and mutate its type pointer to list
t = tuples[0]
addr = id(t)
# Calculate offset to ob_type (platform-dependent; example for CPython 64-bit)
ob_type_offset = ctypes.sizeof(ctypes.c_void_p)
type_ptr = ctypes.c_void_p.from_address(addr + ob_type_offset)
type_ptr.value = id(list)
print(isinstance(t, list), type(t))
Explanation: Map a large file into memory and work on slices via `memoryview` without copying data. Useful for high‑performance parsing or binary protocol manipulation.
import mmap, struct
with open('large.bin', 'r+b') as f:
mm = mmap.mmap(f.fileno(), 0)
view = memoryview(mm)
# Read 1000 32-bit integers without copy
ints = struct.unpack_from('!' + 'I'*1000, view, offset=0)
# Modify the first integer in-place
struct.pack_into('!I', view, 0, ints[0] ^ 0xdeadbeef)
mm.flush()
mm.close()
Unlock a wealth of tutorials and software downloads available across our platform.