import math
# Constants
G = 6.67430e-11 # Universal gravitational constant (m3 kg-1 s-2)
G_EARTH = 9.81 # Acceleration due to gravity on Earth (m/s2)
KG_TO_LBS = 2.20462 # Conversion factor for mass/weight
KM_TO_M = 1000
HR_TO_SEC = 3600
# Earth Data
RADIUS_EARTH_KM = 6378
def vol_sphere(r_meters):
"""Calculates volume of a sphere given radius in meters."""
return (4 / 3) * math.pi * r_meters**3
def acc_gravity(m_kg, r_meters):
"""Calculates acceleration due to gravity (g = GM/r^2)."""
return G * m_kg / r_meters**2
def kepler_constant(t, r):
"""Calculates T^2 / r^3."""
return t**2 / r**3
def calc_mass_star(k):
"""Calculates mass from Kepler constant."""
return (4 * math.pi**2) / (G * k)
def calc_linear_vel(t, r):
"""Calculates orbital speed (v = 2*pi*r / t)."""
return (2 * math.pi * r) / t
def centripetal_acc(v, r):
return v**2 / r
def gravitational_pe(m_star, m_planet, r):
return -G * m_star * m_planet / r
def kinetic_energy(m, v):
return (1 / 2) * m * v**2
def total_orb_energy_func(m_star, m_planet, r):
return -G * m_star * m_planet / (2 * r)
def escape_vel_func(m_star, r):
return math.sqrt(2 * G * m_star / r)
def sp_ang_momentum(v, r):
return v * r
def std_grav_parameter(m_star):
return G * m_star
# --- Planet Kepler-10b Data ---
name_planet = "Kepler-10b"
orbit_radius_km = 2.5e06
period_hrs = 20
# Kepler-10b is roughly 1.4 times the radius of Earth
radius_planet_m = (RADIUS_EARTH_KM * 1.4) * 1000
density_planet = 8800 # kg/m3
# --- Basic Planet Stats ---
volume_planet = vol_sphere(radius_planet_m)
mass_planet = volume_planet * density_planet
acc_gr_planet = acc_gravity(mass_planet, radius_planet_m)
# Weight Calculation for a Human
mass_human_kg = 68
wt_on_earth_lbs = mass_human_kg * KG_TO_LBS
wt_on_planet_lbs = wt_on_earth_lbs * (acc_gr_planet / G_EARTH)
# --- Orbital Dynamics ---
orb_radius_m = orbit_radius_km * KM_TO_M
period_secs = period_hrs * HR_TO_SEC
# Core orbital calculations
kepler_star_val = kepler_constant(period_secs, orb_radius_m)
mass_of_star = calc_mass_star(kepler_star_val)
v_orbit = calc_linear_vel(period_secs, orb_radius_m)
# Derived parameters
centri_acc = centripetal_acc(v_orbit, orb_radius_m)
gpe = gravitational_pe(mass_of_star, mass_planet, orb_radius_m)
ke_planet_val = kinetic_energy(mass_planet, v_orbit)
tot_energy = total_orb_energy_func(mass_of_star, mass_planet, orb_radius_m)
esc_vel = escape_vel_func(mass_of_star, orb_radius_m)
sp_ang_mom = sp_ang_momentum(v_orbit, orb_radius_m)
mu_param = std_grav_parameter(mass_of_star)
# --- Output Results ---
print("-" * 45)
print(f"PHYSICS ANALYSIS: {name_planet}")
print("-" * 45)
print(f"Volume of {name_planet} : {volume_planet:.2e} m3")
print(f"Mass of {name_planet} : {mass_planet:.2e} kg")
print(f"Surface Gravity (g) : {acc_gr_planet:.2f} m/s2")
print(f"Relative Gravity (vs Earth): {acc_gr_planet / G_EARTH:.2f}x")
print("-" * 45)
print(f"Weight of {mass_human_kg}kg person on Earth : {wt_on_earth_lbs:.2f} lbs")
print(f"Weight of {mass_human_kg}kg person on {name_planet}: {wt_on_planet_lbs:.2f} lbs")
print("-" * 45)
print(f"Orbital Velocity (v) : {v_orbit/1000:.2f} km/s")
print(f"Kepler's Constant : {kepler_star_val:.2e} s2/m3")
print(f"Mass of parent star : {mass_of_star:.2e} kg")
print(f"Centripetal Acceleration : {centri_acc:.2f} m/s2")
print(f"Gravitational PE : {gpe:.2e} J")
print(f"Kinetic Energy (Planet) : {ke_planet_val:.2e} J")
print(f"Total Orbital Energy : {tot_energy:.2e} J")
print(f"Escape Velocity : {esc_vel/1000:.2f} km/s")
print(f"Specific Ang. Momentum : {sp_ang_mom:.2e} m2/s")
print(f"Standard Grav. Parameter : {mu_param:.2e} m3/s2")
print("-" * 45)
"Computational solutions for the curious mind. Exploring the intersection of Python programming and rigorous science, featuring solved problems in NASA physics, chemistry, and beyond."
Monday, 30 March 2026
Life on a Lava World: Decoding the Physics of Kepler-10b
Saturday, 28 March 2026
The Power of the Point: Decoding Earthquake Magnitudes Through Code
Yesterday, we took a deep dive into the math behind the ground shaking beneath our feet. While we often hear numbers like "7.0" or "8.0" on the news, those small decimals hide a staggering reality of exponential growth.
Using Python and numpy, we mapped out exactly how much the world moves—and how much energy is released—when the Richter scale ticks upward.
The Logarithmic Reality
The Richter scale isn't linear; it’s logarithmic. This means a magnitude 8.0 isn't just "a bit stronger" than a 7.0—it represents significantly more ground displacement and a massive leap in energy.
import numpy as np
def ground_movement(r):
# Represents relative ground displacement in meters
return 1e-06 * 10**r
def energy_produced(r):
# Approximation of energy in tons of TNT
return 3 * 10**(1.5 * (r - 3.5))
ritcher_values = {
"Hand grenade": 0.2,
"1 stick dynamite": 1.2,
"Chernobyl": 3.9,
"2010 Quebec": 5.0,
"2011 Washington": 5.8,
"2010 Haiti": 7.0,
"1906 San Francisco": 8.0,
"1883 Krakatoa": 8.8,
"1964 Anchorage": 9.2,
"Chicxulub Impact": 12.6
}
# Converting to numpy arrays for vectorized calculations
events = np.array(list(ritcher_values.keys()))
r_magnitudes = np.array(list(ritcher_values.values()))
grnd_movement = ground_movement(r_magnitudes)
energy = energy_produced(r_magnitudes)
# Formatting the Output Table
print(f"{'Event':<20} | {'R':>5} | {'Grnd Move (m)':>15} | {'Energy (Tons TNT)':>18}")
print("-" * 65)
for ev, rm, gm, eng in zip(events, r_magnitudes, grnd_movement, energy):
# Using scientific notation for ground movement and energy due to the massive range
print(f"{ev:<20} | {rm:>5.1f} | {gm:>15.2e} | {eng:>18.2e}")
Earthquakes remind us that nature doesn't operate on a simple 1-to-10 scale. It operates on a curve that turns a ripple into a mountain-mover in just a few decimal points.
Friday, 27 March 2026
Mapping the Invisible: Visualizing Earth’s Magnetosphere with Python
import numpy as np
import matplotlib.pyplot as plt
# --- PHYSICS CALCULATIONS ---
def get_electron_density(freq_hz):
"""
Applies the Plasma Frequency formula: f_p ≈ 9000 * sqrt(Ne).
Returns density in electrons per cubic centimeter (e-/cc).
"""
return (freq_hz / 9000)**2
def get_cartesian_coords(dist_re, angle_deg):
"""Converts polar satellite data to X-Y coordinates."""
rad = np.radians(angle_deg)
return dist_re * np.cos(rad), dist_re * np.sin(rad)
# --- DATASET (Sample RPI Echoes) ---
# "ID": (Angle, Distance_RE, Reflection_Freq_Hz)
plasma_data = {
"1": (300, 1.0, 284000), "2": (315, 2.5, 201000), "3": (350, 6.5, 12600),
"4": (45, 4.5, 20100), "5": (60, 3.9, 25500), "6": (90, 4.1, 28500),
"7": (120, 4.0, 25500), "8": (135, 5.5, 20100), "9": (215, 7.2, 12600),
"10": (230, 3.5, 220000), "11": (270, 1.2, 348000)
}
# --- PROCESSING ---
x, y, densities = [], [], []
for loc, (ang, dist, freq) in plasma_data.items():
nx, ny = get_cartesian_coords(dist, ang)
x.append(nx); y.append(ny)
densities.append(get_electron_density(freq))
# --- VISUALIZATION ---
plt.figure(figsize=(10, 7))
# Red = High Density (Near Earth), Blue = Low Density (Outer Space)
scatter = plt.scatter(x, y, c=densities, s=[d/5 for d in densities],
cmap='coolwarm', edgecolors='black', alpha=0.8)
# Add Earth for reference
earth = plt.Circle((0, 0), 1, color='skyblue', label='Earth')
plt.gca().add_patch(earth)
plt.colorbar(scatter, label='Electron Density (e-/cc)')
plt.title("Visualizing Earth's Magnetosphere via RPI Sounding")
plt.xlabel("Distance ($R_E$)"); plt.ylabel("Distance ($R_E$)")
plt.axis('equal'); plt.grid(True, alpha=0.3)
plt.show()
By visualizing this data, we can clearly see the boundary known as the plasmopause—the dramatic drop-off where the dense, Earth-bound plasma gives way to the vast, sparse vacuum of the outer magnetosphere. The code we’ve built doesn't just plot points; it provides a window into the dynamic environment that protects our satellites and power grids from solar storms. As the data shows, the closer we get to Earth, the more crowded the neighborhood becomes! Whether you're a space enthusiast or a data scientist, mapping the invisible reminds us that even "empty" space is teeming with activity.
Thursday, 26 March 2026
Detecting the Invisible: A Python Simulation of Dark Matter
The "Missing Mass" Problem When astronomers use the Hubble Space Telescope to observe massive galaxy clusters, they encounter a startling discrepancy. By counting the stars and galaxies, we can calculate the Visible Mass (M_{vis}). However, when we measure the speed at which these galaxies orbit the cluster's center, they move far faster than the visible gravity should allow. This script uses the Orbital Velocity formula to reveal the "Mass Gap." According to the laws of gravitation, if a cluster is stable and not flying apart, the mass required to hold it together must be: Mass=vel**2 *R/G where vel= velocity R=radius G= universal gravitational constant The difference between this Mass and our calculated Mass is the physical proof of Dark Matter. The Python Implementation
import math
# Constants
G = 6.672e-11
LY_TO_M = 9.4e15
AVG_STAR_MASS = 2e30
def get_velocity(m, r):
"""Returns escape velocity in m/s"""
return math.sqrt(2 * G * m / r)
def get_mass_from_vel(v_kms, r_m):
"""Returns required mass in kg based on velocity in km/s"""
v_ms = v_kms * 1000 # Convert km/s to m/s
return (v_ms**2 * r_m) / (2 * G)
# Typical cluster data
clusters = {
# nm: (galaxies, stars/gal, rad_ly, observed_vel_km_s)
"A": (350, 10e09, 5e06, 300),
"B": (1000, 10e09, 10e06, 140),
}
results = {}
for name, (gl, st, r_ly, act_vel) in clusters.items():
# 1. Calculate Visible Mass
mass_visible = gl * st * AVG_STAR_MASS
radius_m = r_ly * LY_TO_M
# 2. Calculate Theoretical Velocity (based on stars only)
calc_vel = get_velocity(mass_visible, radius_m) / 1000 # convert to km/s
# 3. Calculate Mass required to reach observed velocity
mass_required = get_mass_from_vel(act_vel, radius_m)
# 4. Find the difference (The "Dark Matter" or "Missing Mass")
mass_diff = mass_required - mass_visible
star_diff = mass_diff / AVG_STAR_MASS
results[name] = {
"visible_mass": mass_visible,
"radius": radius_m,
"calc_vel": calc_vel,
"obs_vel": act_vel,
"mass_diff": mass_diff,
"star_diff": star_diff
}
# --- Output Results ---
for nm, data in results.items():
print(f"--- Galaxy Cluster: {nm} ---")
print(f"Visible Mass: {data['visible_mass']:.2e} kg")
print(f"Radius: {data['radius']:.2e} m")
print(f"Calculated Vel: {data['calc_vel']:.2f} km/s")
print(f"Observed Vel: {data['obs_vel']:.2f} km/s")
if data['mass_diff'] > 0:
print(f"STATUS: Underestimated! Missing gravity detected.")
print(f"DARK MATTER REQ: {data['mass_diff']:.2e} kg ({data['star_diff']:.2e} stars worth)")
elif data['mass_diff'] < 0:
print(f"STATUS: Overestimated! Cluster has too much visible mass.")
print(f"MASS TO SHED: {abs(data['mass_diff']):.2e} kg ({abs(data['star_diff']):.2e} stars worth)")
else:
print("STATUS: Perfectly balanced (No Dark Matter needed).")
print()
Please like this code. Everything is free to use
Tuesday, 24 March 2026
Universe is Really Mind Boggling
"""
PROJECT: ESTIMATING THE TOTAL GALAXIES IN THE OBSERVABLE UNIVERSE
DATA SOURCE: NASA Hubble eXtreme Deep Field (XDF)
The XDF is a tiny 'keyhole' view of the sky. By calculating how many
of these 'keyholes' fit into the entire sky, we can estimate the total
number of galaxies in existence.
"""
import math
# --- 1. CONVERSION CONSTANTS ---
# Geometry of the sky is measured in arcminutes and arcseconds.
# 1 degree = 60 arcminutes. This is essential for converting tiny telescope
# patches into standard degrees.
ONE_DEGREE = 60 # arcminutes per degree
ONE_ARCMIN = 60 # arcseconds per arcminute
# --- 2. THE XDF PATCH DIMENSIONS ---
# The XDF patch is incredibly small: 2.3 x 2.0 arcminutes.
# For context, the Full Moon is about 31 arcminutes wide!
width_arcmins, height_arcmins = 2.3, 2.0
# Convert arcminutes to decimal degrees so we can calculate area
width_degs = width_arcmins / ONE_DEGREE
height_degs = height_arcmins / ONE_DEGREE
print(f"XDF Patch Width: {width_degs:.4f}°")
print(f"XDF Patch Height: {height_degs:.4f}°")
# Area of a rectangle (on a small scale) = width * height
area_patch = width_degs * height_degs
print(f"Area of XDF Patch: {area_patch:.7f} sq. degrees")
# --- 3. CALCULATING THE TOTAL SURFACE AREA OF THE SKY ---
# Space is a sphere surrounding the Earth. The surface area of a sphere is 4πr².
# To get square degrees, we treat the 'radius' as 180/π degrees.
radius_in_degrees = 180 / math.pi
# The total area of the entire sky (sphere) in square degrees
# This should result in approximately 41,253 sq. deg.
area_sky = 4 * math.pi * (radius_in_degrees**2)
print(f"Total Area of Sky: {area_sky:,.2f} sq. degrees")
# --- 4. SCALING UP ---
# How many XDF-sized 'tiles' would it take to cover the whole sky?
num_patches = area_sky / area_patch
print(f"Total XDF patches needed to cover sky: {num_patches:,.0f}")
# Based on the XDF data, there are roughly 5,500 galaxies in this one tiny tile.
galaxies_per_patch = 5500
total_universe_galaxies = galaxies_per_patch * num_patches
print(f"\nESTIMATED TOTAL GALAXIES IN SKY: {total_universe_galaxies:,.0f}")
print("-" * 65)
# --- 5. THE COSMIC TIME MACHINE (LOOKBACK TIME) ---
# Because light takes time to travel, looking further away is looking
# deeper into the past.
galaxies_distribution = {
10: "Seen as they were < 5 Billion Years ago",
30: "Seen as they were 5 to 9 Billion Years ago",
60: "Seen as they were > 9 Billion Years ago (Ancient Universe)"
}
print(f"{'COSMIC AGE DISTRIBUTION':<45 count="">15}")
print("-" * 65)
for pct, description in galaxies_distribution.items():
# Calculating the subset of the total population
count = (pct / 100) * total_universe_galaxies
print(f"{description:<45 count:="">15,.0f}")
45>45>
If you like it, please encourage me. My name is Ranjit Singh. I retired from ONGC as a scientist. Monday, 23 March 2026
Cracking the Cosmic Code: Fermi’s Latest Gamma-Ray Map
import numpy as np
import matplotlib.pyplot as plt
# --- Initial Data from Fermi/LAT Survey ---
# Source: Second catalog of gamma-ray point-sources (1,873 total)
initial_sources = {
"Blazar Galaxy": 1069,
"Pulsars": 115,
"Supernovae": 77,
"Active Galaxies": 20,
"Normal Galaxies & Stars": 20,
"Unknown Objects": 572
}
# --- Data Preparation for Initial Visualization ---
categories = list(initial_sources.keys())
counts = np.array(list(initial_sources.values()))
total_initial = np.sum(counts)
# Print initial summary report using f-strings for alignment
print(f"{'--- INITIAL FERMI/LAT DATA ---':^45}")
print(f"{'Source Category':<30} | {'Initial Count':>10}")
print("-" * 45)
for category, count in initial_sources.items():
print(f"{category:<30} | {count:>10}")
# Generate Initial Pie Chart
fig1, ax1 = plt.subplots(figsize=(10, 7))
ax1.pie(counts, labels=categories, autopct='%1.1f%%', startangle=140)
ax1.set_title(f"Initial Distribution of Fermi/LAT Sources (Total: {total_initial})")
plt.show()
# --- Suzaku Extrapolation Logic ---
# Suzaku studied a sample of 11 of the 572 unidentified sources
suzaku_sample_size = 11
unknown_pool = initial_sources["Unknown Objects"]
# Suzaku's specific findings from that sample
suzaku_findings = {
"Pulsars": 6,
"Blazar Galaxy": 1,
"Normal Galaxies & Stars": 1,
"Unknown Objects": 3
}
# Create a copy to store updated numbers without mutating the original
updated_sources = initial_sources.copy()
# Recalculate distribution based on Suzaku proportions
for category, sample_count in suzaku_findings.items():
if category != "Unknown Objects":
# Proportionally reassign unknown objects to identified categories
extrapolated_count = (sample_count / suzaku_sample_size) * unknown_pool
# Add extrapolated count to existing category
updated_sources[category] += extrapolated_count
# Deduct the same amount from the "Unknown Objects" pool
updated_sources["Unknown Objects"] -= extrapolated_count
# --- Final Updated Report ---
print(f"\n{'--- EXTRAPOLATED SUZAKU UPDATES ---':^45}")
print(f"{'Source Category':<30} | {'Updated Count':>10}")
print("-" * 45)
for category, count in updated_sources.items():
# Use f-strings to round counts to 0 decimal places for a clean display
print(f"{category:<30} | {count:>10.0f}")
# Generate Updated Pie Chart
updated_counts = np.array(list(updated_sources.values()))
fig2, ax2 = plt.subplots(figsize=(10, 7))
ax2.pie(updated_counts, labels=categories, autopct='%1.1f%%', startangle=140)
ax2.set_title(f"Extrapolated Distribution After Suzaku Follow-up")
plt.show()
Sunday, 22 March 2026
Is Space "Lumpy"? Probing Quantum Gravity with Python
This Python script explores a frontier of modern physics known as Lorentz Invariance Violation (LIV), which investigates whether the "smooth" fabric of Einstein's spacetime becomes "lumpy" or "foamy" at incredibly small scales. By calculating the arrival time differences of gamma rays from distant cosmic sources like quasars, the code determines a characteristic length scale for this spatial graininess. While classical physics assumes all light travels at exactly $c$ in a vacuum, quantum gravity theories suggest that higher-energy photons might "bump" into the quantum foam of space, causing a measurable lag over billions of light-years.
The Python Implementation
To ensure Google and other search engines can "read" this code, I have embedded it as text within a preformatted block. This allows the logic to be searchable while remaining easy to read.
# Constants
C = 29979245800.0 # Speed of light in cm/sec
LY_TO_CM = 9.46073e17 # 1 Light Year in cm
PLANCK_LENGTH = 1.616e-33 # Theoretical "quantum" grain size (cm)
NUCLEUS_SIZE = 1e-13 # Average size of an atomic nucleus (cm)
def analyze_space_grain(delta_t, distance, unit, wl_diff_cm):
"""
Calculates spatial lumpiness (L) based on the observed time delay
of photons with different wavelengths.
"""
dist_cm = distance * LY_TO_CM if unit.lower() in ["ly", "light year"] else distance
lumpiness = (delta_t * C * wl_diff_cm) / dist_cm
return lumpiness
# Dataset of Cosmic Observations
rays = {
"Quasar": (1.0, 1e-9, 5e9, "ly"),
"Distant Object": (0.7, 4e-12, 10e9, "ly")
}
print(f"{'Source':<18} | {'Lumpiness (cm)':<15} | {'Classification'}")
print("-" * 65)
for name, (td, wld, d, u) in rays.items():
lump = analyze_space_grain(td, d, u, wld)
if lump > NUCLEUS_SIZE:
status = "Large (Nuclear Scale)"
elif lump > 1e-20:
status = "Sub-Nuclear Scale"
elif lump > PLANCK_LENGTH:
status = "Quantum Scale"
else:
status = "Smooth (Planck Limit)"
print(f"{name:<18} | {lump:.2e} cm | {status}")
Physics Significance
By comparing our results to the size of an atomic nucleus or the theoretical Planck Length, we can set experimental "upper limits" on the smoothness of the universe. If our observations show almost no time delay, it proves that spacetime remains perfectly smooth even at sub-particle dimensions.
Friday, 20 March 2026
Applications of Elemental Properties
Thursday, 19 March 2026
The Physics of a Gas Mixture: Analyzing the Air We Breathe
Python: The Physics of a Gas Mixture
Author: [Ranjit Singh]
Molar Volume at STP (Standard Temp and Pressure) in cc/mol
MOLAR_VOLUME = 22400
{ "Name": (Volume %, Molecular Weight) }
COMPOSITION = {
"Nitrogen, N2": (78.08, 28.013),
"Oxygen, O2": (20.95, 31.998),
"Argon, Ar": (0.93, 39.948),
"Carbon dioxide, CO2": (0.031, 44.009),
"Hydrogen, H2": (5.9e-03, 2.016),
"Neon, Ne": (1.8e-03, 20.180),
"Helium, He": (5.2e-04, 4.003),
"Methane, CH4": (2.0e-04, 16.043),
"Krypton, Kr": (1.1e-04, 83.798),
"Nitric oxide, NO": (5.0e-05, 30.006),
"Xenon, Xe": (8.7e-06, 131.29),
"Ozone, O3": (7.0e-06, 47.998)
}
sum_vol = 0
sum_mass = 0
mass_dict = {}
Header for the detailed output
print(f"{'Component':<22} {'Vol %':<10} {'Mass (g/100cc)'}")
print("-" * 52)
for gas, (vol, mlwt) in COMPOSITION.items():
sum_vol += vol
# Calculate mass of each component in 100cc of air
# Logic: (Molar_Mass / Molar_Volume) * Vol %
mass = (mlwt / MOLAR_VOLUME) * vol
sum_mass += mass
mass_dict[gas] = mass
print(f"{gas:<22} {vol:<10} {mass:.8f}")
Average Molecular Weight calculation
Avg_MLWT = (Total Mass * Molar_Volume) / Volume Sample
avg_mlwt = (sum_mass * MOLAR_VOLUME) / 100
Density (mass / volume)
density = sum_mass / sum_vol
print("\n" + "="*45)
print(f"Total Volume Checked: {sum_vol:.2f}%")
print(f"Avg Mol Weight of Air: {avg_mlwt:.2f} g/mol")
print(f"Density of Air: {density:.5e} g/cc")
print("="*45 + "\n")
Percentage by Mass calculation
print(f"{'Component':<22} {'% By Mass'}")
print("*" * 35)
for g, m in mass_dict.items():
m_percent = (m / sum_mass) * 100
print(f"{g:<22} {m_percent:>10.5f}%")
For any students reading, the constant 22,400 is derived from the Ideal Gas Law (PV = nRT) for a single mole (n=1) at 273.15K and 1 atm.
In real-world exploration at ONGC, we knew that conditions were rarely "standard," but this script provides the necessary logical framework. We could easily modify this code to calculate the density at a different pressure (like at the bottom of a well) or even account for humidity.
If you found this useful, my next post will look at solving the Ideal Gas Law using Python and handling common unit conversions automatically.
Visualizing Particle Kinematics with Python
Understanding the relationship between position, velocity, and acceleration is fundamental to physics. However, seeing how these variables e...
-
The important source of aerosols in the stratosphere is the formation of carbonyl sulfide (COS) droplets. These droplets are more dangerou...
-
This code in python demonstrates how to calculate molecular weights, physical properties like mole fractions, partial pressure calculations ...
-
The code is based on finding volume, density and orbit radius of some dwarf planets. Further we tried to match these densities with one'...




