The code copy is as follows:
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.net.*;
import java.io.*;
import java.util.regex.*;
/*
Obtain the specified rule by constructing regular expressions
*/
public class Urls
{
private String startUrl; //Start collection of URLs
String urlContent;
String ContentArea;
private String strAreaBegin ,strAreaEnd ; //The acquisition area starts and ends the acquisition string
private String stringInUrl,stringNotInUrl;
String strContent;//The collected content obtained
String[] allUrls; //All URLs collected
private String regex; //Collection rules
UrlAndTitle urlAndTitle=new UrlAndTitle(); //Storage URL and title
public static void main(String[] args)
{
Urls myurl=new Urls("<body","/body>");
myurl.getStartUrl("http://www.zuzwn.com/");
myurl.getUrlContent();
myurl.getContentArea();
myurl.getStartUrl("http://www.zuzwn.com/");
myurl.getStringNotInUrl("google");
myurl.Urls();
//System.out.println("startUrl:"+myurl.startUrl);
//System.out.println("urlcontent:"+myurl.urlContent);
//System.out.println("ContentArea:"+myurl.ContentArea);
}
//Initialize constructors strAreaBegin and strAreaEnd
public Urls (String strAreaBegin,String strAreaEnd)
{
this.strAreaBegin=strAreaBegin;
this.strAreaEnd=strAreaEnd;
}
//
public void Urls()
{
int i=0;
//String regex ="<a href="?'?http://[a-zA-Z0-9]+/.[a-zA-Z0-9]+/.[a-zA-Z]+ /?[/.?[/S|/s]]+[a>]$";
String regex ="<a.*?/a>";
//String regex ="http://.*?>";
Pattern pt=Pattern.compile(regex);
Matcher mt=pt.matcher(ContentArea);
While(mt.find())
{
System.out.println(mt.group());
i++;
//Get the title
Matcher title=Pattern.compile(">.*?</a>").matcher(mt.group());
While(title.find())
{
System.out.println("Title:"+title.group().replaceAll(">|</a>",""));
}
//Get the URL
Matcher myurl=Pattern.compile("href=.*?>").matcher(mt.group());
While(myurl.find())
{
System.out.println("Website:"+myurl.group().replaceAll("href=|>",""));
}
System.out.println();
}
System.out.println("There are totals"+i+"compliant results");
}
//Get the start collection website
public void getStartUrl(String startUrl)
{
this.startUrl=startUrl;
}
//Get the content where the URL is located;
public void getUrlContent()
{
StringBuffer is=new StringBuffer();
try
{
URL myUrl=new URL(startUrl);
BufferedReader br= new BufferedReader(
new InputStreamReader(myUrl.openStream()));
String s;
while((s=br.readLine())!=null)
{
is.append(s);
}
urlContent=is.toString();
}
catch(Exception e)
{
System.out.println("url file failed to output");
e.printStackTrace();
}
}
//Get the matching area where the URL is located
public void getContentArea()
{
int pos1=0,pos2=0;
pos1= urlContent.indexOf(strAreaBegin)+strAreaBegin.length();
pos2=urlContent.indexOf(strAreaEnd,pos1);
ContentArea=urlContent.substring(pos1,pos2);
}
//The following two functions obtain the keywords that the URL should contain and the keywords that cannot be included
//Only preliminary experiments are done here. In the later stage, there should be more than one protected keyword and keyword that cannot be included.
public void getStringInUrl(String stringInUrl)
{
this.stringInUrl=stringInUrl;
}
public void getStringNotInUrl(String stringNotInUrl)
{
this.stringNotInUrl=stringNotInUrl;
}
//Get the collection rules
//Get the URL
public void getUrl()
{
}
public String getRegex()
{
return regex;
}
class UrlAndTitle
{
String myURL;
String title;
}
}