import networkx as nx
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from collections import defaultdict

def deep_understanding(text):
    """
    Breaks text into concepts, finds relationships between them and returns a knowledge graph.

    Args:
        text (str): Input text to be processed.

    Returns:
        dict: Nested dictionary representing the knowledge graph with connections weighted by strength.
    """
    G = nx.Graph()
    tokens = word_tokenize(text)
    stop_words = set(stopwords.words('english'))
    concepts = [t for t in tokens if t.isalpha() and t.lower() not in stop_words]
    
    concept_graph = defaultdict(dict)
    for i, c1 in enumerate(concepts):
        for j, c2 in enumerate(concepts):
            if i != j:
                similarity = len(set(c1.split()) & set(c2.split())) / (len(set(c1.split())) + len(set(c2.split())))
                concept_graph[c1][c2] = similarity
    
    return dict(concept_graph)