import numpy as np from scipy.integrate import odeint import matplotlib.pyplot as plt
defspring_mass_damper(state, t, m, k, c): """ Spring-mass-damper system Second order ODE: m*d²x/dt² + c*dx/dt + k*x = 0 Converted to system of first order ODEs: dx/dt = v dv/dt = -(k/m)x - (c/m)v """ x, v = state dxdt = v dvdt = -(k/m) * x - (c/m) * v return [dxdt, dvdt]
# System parameters m = 1.0# mass (kg) k = 10.0# spring constant (N/m) c = 0.5# damping coefficient (Ns/m)
# Calculate energy potential_energy = 0.5 * k * x**2 kinetic_energy = 0.5 * m * v**2 total_energy = potential_energy + kinetic_energy
# Create plots plt.figure(figsize=(15, 10))
# Position vs Time plt.subplot(2, 2, 1) plt.plot(t, x, 'b-', label='Position') plt.grid(True) plt.xlabel('Time (s)') plt.ylabel('Position (m)') plt.title('Position vs Time') plt.legend()
# Velocity vs Time plt.subplot(2, 2, 2) plt.plot(t, v, 'r-', label='Velocity') plt.grid(True) plt.xlabel('Time (s)') plt.ylabel('Velocity (m/s)') plt.title('Velocity vs Time') plt.legend()
A Python-Based Study of Pipe Flow and Pressure Drop Analysis
I’ll create a example of fluid dynamics, this time focusing on pipe flow analysis using the $Hagen$-$Poiseuille$ equation and pressure drop calculations.
import numpy as np import matplotlib.pyplot as plt from scipy.optimize import fsolve
classPipeFlowAnalyzer: def__init__(self): # Fluid properties (water at 20°C) self.density = 998.2# kg/m³ self.viscosity = 0.001# Pa·s # Pipe properties self.length = 10.0# m self.diameter = 0.05# m self.roughness = 0.0015# mm (typical for commercial steel) defreynolds_number(self, velocity): """Calculate Reynolds number""" return self.density * velocity * self.diameter / self.viscosity deffriction_factor(self, reynolds): """ Calculate Darcy friction factor using Colebrook-White equation """ defcolebrook(f): return (1/np.sqrt(f) + 2.0*np.log10(self.roughness/(3.7*self.diameter*1000) + 2.51/(reynolds*np.sqrt(f)))) if reynolds < 2300: return64/reynolds else: # Initial guess using Swamee-Jain equation f_0 = 0.25/(np.log10(self.roughness/(3.7*self.diameter*1000) + 5.74/reynolds**0.9)**2) return fsolve(colebrook, f_0)[0] defpressure_drop(self, velocity): """Calculate pressure drop using Darcy-Weisbach equation""" re = self.reynolds_number(velocity) f = self.friction_factor(re) return (f * self.length * self.density * velocity**2) / (2 * self.diameter) defvelocity_profile(self, flow_rate, r_points=50): """Calculate velocity profile for laminar flow""" radius = self.diameter/2 r = np.linspace(0, radius, r_points) # Maximum velocity at center v_max = 2 * flow_rate / (np.pi * radius**2) # Parabolic profile v = v_max * (1 - (r/radius)**2) return r, v defanalyze_flow_range(self, velocities): """Analyze flow characteristics over a range of velocities""" reynolds = [self.reynolds_number(v) for v in velocities] pressure_drops = [self.pressure_drop(v) for v in velocities] friction_factors = [self.friction_factor(re) for re in reynolds] return reynolds, pressure_drops, friction_factors defplot_analysis(self, velocities): """Create visualizations of flow analysis""" reynolds, pressure_drops, friction_factors = self.analyze_flow_range(velocities) fig = plt.figure(figsize=(15, 10)) # Pressure drop vs Velocity ax1 = fig.add_subplot(221) ax1.plot(velocities, pressure_drops, 'b-', linewidth=2) ax1.set_xlabel('Flow Velocity (m/s)') ax1.set_ylabel('Pressure Drop (Pa)') ax1.set_title('Pressure Drop vs Flow Velocity') ax1.grid(True) # Friction factor vs Reynolds number ax2 = fig.add_subplot(222) ax2.semilogx(reynolds, friction_factors, 'r-', linewidth=2) ax2.set_xlabel('Reynolds Number') ax2.set_ylabel('Friction Factor') ax2.set_title('Moody Diagram') ax2.grid(True) # Velocity profile for laminar flow ax3 = fig.add_subplot(223) flow_rate = 0.001# m³/s r, v = self.velocity_profile(flow_rate) ax3.plot(r*1000, v, 'g-', linewidth=2) # Convert radius to mm ax3.set_xlabel('Radial Position (mm)') ax3.set_ylabel('Velocity (m/s)') ax3.set_title('Laminar Flow Velocity Profile') ax3.grid(True) # Head loss vs Flow rate ax4 = fig.add_subplot(224) head_losses = [dp/(self.density * 9.81) for dp in pressure_drops] flow_rates = [v * np.pi * (self.diameter/2)**2for v in velocities] ax4.plot(flow_rates, head_losses, 'm-', linewidth=2) ax4.set_xlabel('Flow Rate (m³/s)') ax4.set_ylabel('Head Loss (m)') ax4.set_title('Head Loss vs Flow Rate') ax4.grid(True) plt.tight_layout() return fig
# Create analyzer and run analysis analyzer = PipeFlowAnalyzer()
# Define velocity range for analysis velocities = np.linspace(0.1, 5, 100)
# Plot results analyzer.plot_analysis(velocities)
# Print analysis for specific velocity test_velocity = 1.0# m/s re = analyzer.reynolds_number(test_velocity) dp = analyzer.pressure_drop(test_velocity) f = analyzer.friction_factor(re)
We will simulate how opinions spread in a social network.
The idea is based on the DeGroot model of consensus, where each node in the network updates its opinion based on a weighted average of its neighbors’ opinions.
Problem Setup
A network of individuals is represented as a graph.
Each node has an initial opinion (randomized).
Opinions are updated iteratively using the weighted average of neighboring opinions.
for _ inrange(iterations): opinions = weights @ opinions # Weighted average of neighbors opinion_history.append(opinions.copy())
# Step 5: Visualization of the results plt.figure(figsize=(12, 6))
# Plot the convergence of opinions over time for i, opinion_track inenumerate(np.array(opinion_history).T): plt.plot(opinion_track, label=f'Node {i}', alpha=0.6)
plt.title("Opinion Dynamics in a Social Network (DeGroot Model)") plt.xlabel("Iteration") plt.ylabel("Opinion Value") plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize='small') plt.grid(True) plt.show()
# Plot the initial and final opinions on the graph fig, axes = plt.subplots(1, 2, figsize=(14, 6))
import numpy as np from scipy.integrate import solve_ivp import matplotlib.pyplot as plt
# Define the Lotka-Volterra equations deflotka_volterra(t, z, alpha, beta, delta, gamma): x, y = z dxdt = alpha * x - beta * x * y dydt = delta * x * y - gamma * y return [dxdt, dydt]
Example: Modeling Population Growth with Logistic Growth Equation
In Environmental Mathematics, one of the fundamental models used to describe the growth of populations, such as animal populations or human populations, is the Logistic Growth Model.
This model accounts for the carrying capacity of the environment, which limits indefinite exponential growth.
Logistic Growth Equation
The Logistic Growth Model is represented by the following differential equation:
$$ \frac{dP}{dt} = rP \left(1 - \frac{P}{K}\right) $$
$( P(t) )$ = Population size at time $( t )$
$( r )$ = Intrinsic growth rate (how quickly the population grows)
$( K )$ = Carrying capacity (maximum population size that the environment can support)
$( t )$ = Time
$( P(0) )$ = Initial population size at $( t = 0 )$
The solution to this equation models the population growth over time and shows an S-shaped curve, where the population grows exponentially when small, then slows down as it approaches the carrying capacity $( K )$.
Problem
We will use the Logistic Growth Model to model the population growth of a species over time.
We will visualize how the population changes as a function of time.
We will explore the effect of different intrinsic growth rates and carrying capacities on the population size.
Steps
Define the logistic growth equation and its solution.
Simulate the population growth using $Python$.
Plot the population size over time for different parameters.
import numpy as np import matplotlib.pyplot as plt
# Logistic growth model function deflogistic_growth(t, P0, r, K): """ Computes the population at time t based on the logistic growth model. Parameters: t: time (array or scalar) P0: initial population size r: intrinsic growth rate K: carrying capacity Returns: Population size at time t """ return K / (1 + (K - P0) / P0 * np.exp(-r * t))
# Parameters P0 = 10# Initial population size r = 0.5# Intrinsic growth rate K = 1000# Carrying capacity t = np.linspace(0, 20, 200) # Time from 0 to 20 with 200 points
# Compute population over time population = logistic_growth(t, P0, r, K)
# Labels and title plt.title('Logistic Growth Model: Population vs Time') plt.xlabel('Time (t)') plt.ylabel('Population (P)') plt.grid(True) plt.legend() plt.show()
# Effect of different intrinsic growth rates (r) on population growth r_values = [0.1, 0.3, 0.5, 0.7] plt.figure(figsize=(8, 6))
for r in r_values: population_r = logistic_growth(t, P0, r, K) plt.plot(t, population_r, label=f'r = {r}')
plt.title('Effect of Intrinsic Growth Rate on Logistic Growth') plt.xlabel('Time (t)') plt.ylabel('Population (P)') plt.legend() plt.grid(True) plt.show()
# Effect of different carrying capacities (K) on population growth K_values = [500, 1000, 1500, 2000] plt.figure(figsize=(8, 6))
for K_val in K_values: population_K = logistic_growth(t, P0, r, K_val) plt.plot(t, population_K, label=f'K = {K_val}')
plt.title('Effect of Carrying Capacity on Logistic Growth') plt.xlabel('Time (t)') plt.ylabel('Population (P)') plt.legend() plt.grid(True) plt.show()
Explanation of the Code
Logistic Growth Function:
The function logistic_growth(t, P0, r, K) computes the population at time $( t )$ using the logistic growth equation. It takes:
t: Time (which can be an array for continuous simulation)
P0: The initial population size at time $( t = 0 )$
r: The intrinsic growth rate of the population
K: The carrying capacity (maximum population the environment can support)
The function calculates the population at time $( t )$ using the formula for logistic growth.
Parameters:
$( P_0 = 10 )$: Initial population size.
$( r = 0.5 )$: Intrinsic growth rate (the speed of population growth).
$( K = 1000 )$: Carrying capacity (the environment can support up to $1000$ individuals).
t = np.linspace(0, 20, 200): We simulate the population over $20$ time units, with $200$ points to generate a smooth curve.
Plotting the Results:
The first plot visualizes the population growth over time using the logistic model.
The second plot shows how different intrinsic growth rates (r = 0.1, 0.3, 0.5, 0.7) affect the population growth. A higher growth rate results in faster population growth.
The third plot shows how varying the carrying capacity (K = 500, 1000, 1500, 2000) influences the population. A higher carrying capacity allows the population to grow to a higher final size.
Expected Outcome
Logistic Growth Curve: The first plot will show the characteristic S-shaped curve. The population starts growing exponentially, but as it approaches the carrying capacity $( K )$, it slows down and levels off.
Effect of Intrinsic Growth Rate: In the second plot, as the intrinsic growth rate $( r )$ increases, the population grows faster at the beginning, but the population ultimately approaches the same carrying capacity in each case. The time it takes to reach near the carrying capacity is shorter for higher growth rates.
Effect of Carrying Capacity: In the third plot, increasing the carrying capacity $( K )$ leads to a higher maximum population. However, the growth curve still follows the same logistic pattern, only the “$ceiling$” is higher.
Result Analysis
The S-shaped curve is a key feature of the logistic growth model, showing how the population grows rapidly at first, but as resources become limited, the growth slows and eventually stabilizes near the carrying capacity.
The intrinsic growth rate $( r )$ affects the speed of growth. A higher growth rate leads to a quicker approach to the carrying capacity.
The carrying capacity $( K )$ defines the maximum population that the environment can support. If $( K )$ is increased, the population can grow to a higher value before stabilizing.
Conclusion
This example demonstrates how Environmental Mathematics uses mathematical models like logistic growth to understand population dynamics in a given environment.
By adjusting parameters such as the intrinsic growth rate and carrying capacity, we can predict how populations will evolve over time and make informed decisions for environmental management, conservation, and resource allocation.
The Tunneling Effect in physics refers to the phenomenon where a particle has a probability of crossing a potential barrier, even if its energy is lower than the height of the barrier.
This effect is most commonly discussed in quantum mechanics and has significant applications in areas like nuclear fusion, semiconductor physics, and scanning tunneling microscopes.
One way to visualize this phenomenon is to consider a quantum particle in a potential well with a barrier.
Classically, if the particle’s energy is lower than the barrier height, it should not be able to pass through.
However, quantum mechanically, there is a finite probability for the particle to “tunnel” through the barrier.
Example Problem
We will model a particle in a one-dimensional potential well with a rectangular potential barrier.
The wavefunction of the particle is described by the $Schrödinger$ $equation$, and the probability of tunneling can be computed using a simplified approach.
Potential setup: A rectangular potential barrier with height $( V_0 )$ and width $( L )$.
Particle energy: $( E )$, where $( E < V_0 )$, so the particle does not have enough energy to overcome the barrier classically.
We can compute the transmission probability $ T(E) $, which gives the likelihood that the particle will tunnel through the barrier.
For a rectangular potential barrier, the transmission probability is given by:
$$ T(E) = e^{-2 \gamma L} $$
Where $( \gamma )$ is given by:
$$ \gamma = \frac{\sqrt{2m(V_0 - E)}}{\hbar} $$
$( m )$ is the mass of the particle.
$( V_0 )$ is the potential barrier height.
$( E )$ is the energy of the particle.
$( L )$ is the width of the potential barrier.
$( \hbar )$ is the reduced Planck’s constant.
Python Implementation:
We will write $Python$ code to compute the transmission probability for various particle energies and plot the result.
import numpy as np import matplotlib.pyplot as plt
# Constants hbar = 1.0545718e-34# Reduced Planck's constant in Joules * seconds m = 9.11e-31# Mass of the electron in kg (approximate) V0 = 1.0e-18# Potential barrier height in Joules (approx 6 eV) L = 1.0e-10# Width of the potential barrier in meters (1 Å)
# Function to calculate tunneling transmission probability deftunneling_probability(E, V0, L, m, hbar): if E >= V0: return1# No tunneling if energy is higher than or equal to barrier height gamma = np.sqrt(2 * m * (V0 - E)) / hbar return np.exp(-2 * gamma * L)
# Energies of the particle (from 0 to V0) E_values = np.linspace(0, V0, 500)
# Calculate tunneling probabilities for each energy T_values = [tunneling_probability(E, V0, L, m, hbar) for E in E_values]
# Plotting the results plt.figure(figsize=(8, 6)) plt.plot(E_values / 1.60218e-19, T_values, label='Transmission Probability T(E)', color='b') plt.xlabel("Energy (eV)") plt.ylabel("Transmission Probability T(E)") plt.title("Quantum Tunneling Effect: Transmission Probability vs Energy") plt.grid(True) plt.axhline(0, color='black',linewidth=1) plt.axvline(V0 / 1.60218e-19, color='red', linestyle='--', label='V0 (Barrier Height)') plt.legend() plt.show()
Code Explanation:
Constants: We define the constants:
hbar: Reduced Planck’s constant (in Joules·seconds).
m: The mass of an electron in $kg$.
V0: The potential barrier height in Joules ($1$ $eV$ is approximately $( 1.60218 \times 10^{-19} )$ J).
L: The width of the potential barrier (in meters, $1$ $Ångström$).
Tunneling Probability Function:
The function tunneling_probability(E, V0, L, m, hbar) computes the transmission probability $ T(E) $ using the formula derived above.
If the particle’s energy $( E )$ is greater than or equal to the barrier height $( V_0 )$, tunneling is not possible, so $( T(E) = 1 )$ (i.e., no tunneling).
Otherwise, the transmission probability is computed using the exponential decay formula.
Energy Range:
We generate a range of energy values from $0$ to $( V_0 )$. These energies represent possible particle energies below the barrier height.
Plotting:
The plot shows the transmission probability $ T(E) $ as a function of energy.
The $x$-axis represents the energy of the particle in electron volts ($eV$).
The $y$-axis shows the transmission probability.
We highlight the potential barrier height $( V_0 )$ using a vertical dashed red line.
Graphical Interpretation
As the energy of the particle approaches the barrier height $( V_0 )$, the transmission probability increases.
For energies much lower than the barrier height, the probability decreases exponentially, indicating that tunneling becomes less likely.
At higher energies (but still below $( V_0 )$), the probability of tunneling increases slightly, though it is still low.
Graph Analysis
Transmission at low energies: For energies significantly lower than the barrier height, tunneling is highly unlikely, and $ T(E) $ is close to $0$.
Near the barrier height: As the energy gets closer to $( V_0 )$, the tunneling probability increases due to the reduced decay rate $( \gamma )$.
Beyond the barrier height: If $( E )$ were greater than or equal to $( V_0 )$, the transmission probability would be $1$, indicating that the particle can easily pass through the barrier.
This model demonstrates how quantum tunneling occurs in a system where a particle has insufficient classical energy to overcome a potential barrier but still has a non-zero probability of passing through it due to the quantum nature of particles.
defcalculate_magnetization(lattice): """Calculate the magnetization of the lattice.""" return np.abs(np.sum(lattice)) / lattice.size
defsimulate_ising(size, temperatures, steps): """Simulate the Ising model for a range of temperatures.""" lattice = initialize_lattice(size) magnetizations = [] for temp in temperatures: beta = 1 / temp for _ inrange(steps): # Equilibrate lattice = metropolis_step(lattice, beta) magnetization = calculate_magnetization(lattice) magnetizations.append(magnetization) return magnetizations
Initialization: The lattice is a grid of spins, initialized randomly.
Metropolis Algorithm: For each spin, we calculate the energy change (ΔE) if the spin is flipped. The flip is accepted based on the Metropolis criterion, which ensures proper statistical weighting.
Magnetization: The total magnetization of the system is recorded as the absolute sum of spins normalized by the number of spins.
Temperature Sweep: By sweeping through a range of temperatures, we observe how the magnetization changes, showing a sharp drop near the critical temperature $(( T_c \approx 2.27 )$ for the 2D Ising Model).
Result
The graph shows magnetization as a function of temperature.
Below the critical temperature $(T_c)$, the system exhibits spontaneous magnetization, indicating a ferromagnetic phase.
Above $(T_c)$, the magnetization drops to zero, showing a transition to the paramagnetic phase.